summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/docker
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 16:03:42 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 16:03:42 +0000
commit66cec45960ce1d9c794e9399de15c138acb18aed (patch)
tree59cd19d69e9d56b7989b080da7c20ef1a3fe2a5a /ansible_collections/community/docker
parentInitial commit. (diff)
downloadansible-66cec45960ce1d9c794e9399de15c138acb18aed.tar.xz
ansible-66cec45960ce1d9c794e9399de15c138acb18aed.zip
Adding upstream version 7.3.0+dfsg.upstream/7.3.0+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/community/docker')
-rw-r--r--ansible_collections/community/docker/.azure-pipelines/README.md9
-rw-r--r--ansible_collections/community/docker/.azure-pipelines/azure-pipelines.yml285
-rwxr-xr-xansible_collections/community/docker/.azure-pipelines/scripts/aggregate-coverage.sh28
-rwxr-xr-xansible_collections/community/docker/.azure-pipelines/scripts/combine-coverage.py64
-rwxr-xr-xansible_collections/community/docker/.azure-pipelines/scripts/process-results.sh28
-rwxr-xr-xansible_collections/community/docker/.azure-pipelines/scripts/publish-codecov.py105
-rwxr-xr-xansible_collections/community/docker/.azure-pipelines/scripts/report-coverage.sh23
-rwxr-xr-xansible_collections/community/docker/.azure-pipelines/scripts/run-tests.sh38
-rwxr-xr-xansible_collections/community/docker/.azure-pipelines/scripts/time-command.py29
-rw-r--r--ansible_collections/community/docker/.azure-pipelines/templates/coverage.yml43
-rw-r--r--ansible_collections/community/docker/.azure-pipelines/templates/matrix.yml59
-rw-r--r--ansible_collections/community/docker/.azure-pipelines/templates/test.yml49
-rw-r--r--ansible_collections/community/docker/.github/dependabot.yml11
-rw-r--r--ansible_collections/community/docker/.github/patchback.yml9
-rw-r--r--ansible_collections/community/docker/.github/workflows/ansible-test.yml150
-rw-r--r--ansible_collections/community/docker/.github/workflows/docs-pr.yml92
-rw-r--r--ansible_collections/community/docker/.github/workflows/docs-push.yml52
-rw-r--r--ansible_collections/community/docker/.github/workflows/ee.yml118
-rw-r--r--ansible_collections/community/docker/.github/workflows/reuse.yml32
-rw-r--r--ansible_collections/community/docker/.reuse/dep55
-rw-r--r--ansible_collections/community/docker/CHANGELOG.rst895
-rw-r--r--ansible_collections/community/docker/CHANGELOG.rst.license3
-rw-r--r--ansible_collections/community/docker/COPYING674
-rw-r--r--ansible_collections/community/docker/FILES.json5206
-rw-r--r--ansible_collections/community/docker/LICENSES/Apache-2.0.txt191
-rw-r--r--ansible_collections/community/docker/LICENSES/GPL-3.0-or-later.txt674
-rw-r--r--ansible_collections/community/docker/MANIFEST.json33
-rw-r--r--ansible_collections/community/docker/README.md135
-rw-r--r--ansible_collections/community/docker/changelogs/changelog.yaml1141
-rw-r--r--ansible_collections/community/docker/changelogs/changelog.yaml.license3
-rw-r--r--ansible_collections/community/docker/changelogs/config.yaml34
-rw-r--r--ansible_collections/community/docker/changelogs/fragments/.keep0
-rw-r--r--ansible_collections/community/docker/docs/docsite/extra-docs.yml9
-rw-r--r--ansible_collections/community/docker/docs/docsite/links.yml27
-rw-r--r--ansible_collections/community/docker/docs/docsite/rst/scenario_guide.rst232
-rw-r--r--ansible_collections/community/docker/meta/ee-bindep.txt3
-rw-r--r--ansible_collections/community/docker/meta/ee-requirements.txt14
-rw-r--r--ansible_collections/community/docker/meta/execution-environment.yml9
-rw-r--r--ansible_collections/community/docker/meta/runtime.yml32
-rw-r--r--ansible_collections/community/docker/plugins/action/docker_container_copy_into.py40
-rw-r--r--ansible_collections/community/docker/plugins/connection/docker.py452
-rw-r--r--ansible_collections/community/docker/plugins/connection/docker_api.py338
-rw-r--r--ansible_collections/community/docker/plugins/connection/nsenter.py239
-rw-r--r--ansible_collections/community/docker/plugins/doc_fragments/attributes.py96
-rw-r--r--ansible_collections/community/docker/plugins/doc_fragments/docker.py297
-rw-r--r--ansible_collections/community/docker/plugins/inventory/docker_containers.py351
-rw-r--r--ansible_collections/community/docker/plugins/inventory/docker_machine.py275
-rw-r--r--ansible_collections/community/docker/plugins/inventory/docker_swarm.py264
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/_import_helper.py97
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/api/client.py606
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/api/daemon.py196
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/auth.py388
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/constants.py50
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/credentials/constants.py16
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/credentials/errors.py38
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/credentials/store.py119
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/credentials/utils.py62
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/errors.py223
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/tls.py122
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/basehttpadapter.py20
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/npipeconn.py119
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/npipesocket.py236
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/sshconn.py275
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py73
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/unixconn.py123
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/types/daemon.py83
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/build.py305
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/config.py78
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/decorators.py59
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/fnmatch.py127
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/json_stream.py89
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/ports.py95
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/proxy.py85
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py193
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py524
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_scramble.py56
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/common.py693
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/common_api.py591
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/copy.py442
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/image_archive.py157
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/module_container/base.py1204
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py1353
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/module_container/module.py843
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/socket_handler.py210
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/socket_helper.py62
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/swarm.py281
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/util.py394
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/version.py24
-rw-r--r--ansible_collections/community/docker/plugins/modules/current_container_facts.py145
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_compose.py1220
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_config.py434
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_container.py1288
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py870
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_container_exec.py305
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_container_info.py144
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_host_info.py383
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_image.py1033
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_image_info.py273
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_image_load.py194
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_login.py451
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_network.py679
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_network_info.py140
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_node.py306
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_node_info.py162
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_plugin.py392
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_prune.py275
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_secret.py406
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_stack.py309
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_stack_info.py88
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py98
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_swarm.py725
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_swarm_info.py388
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_swarm_service.py2866
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py121
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_volume.py312
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_volume_info.py127
-rw-r--r--ansible_collections/community/docker/plugins/plugin_utils/common.py41
-rw-r--r--ansible_collections/community/docker/plugins/plugin_utils/common_api.py40
-rw-r--r--ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py18
-rw-r--r--ansible_collections/community/docker/tests/config.yml9
-rw-r--r--ansible_collections/community/docker/tests/ee/all.yml20
-rw-r--r--ansible_collections/community/docker/tests/ee/roles/current_container_facts/tasks/main.yml32
-rw-r--r--ansible_collections/community/docker/tests/ee/roles/docker_plain/tasks/main.yml32
-rw-r--r--ansible_collections/community/docker/tests/ee/roles/docker_stack/tasks/main.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection/aliases5
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/connection/test.sh16
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection/test_connection.yml48
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_docker/aliases7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_docker/meta/main.yml7
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/connection_docker/runme-connection.sh23
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/connection_docker/runme.sh65
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_docker/setup.yml14
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_docker/shutdown.yml20
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_docker_api/aliases7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_docker_api/meta/main.yml7
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme-connection.sh23
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme.sh65
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_docker_api/setup.yml14
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_docker_api/shutdown.yml20
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_nsenter/aliases8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_nsenter/meta/main.yml7
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/connection_nsenter/runme-connection.sh23
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/connection_nsenter/runme.sh73
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_nsenter/setup.yml14
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_nsenter/shutdown.yml20
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/connection_posix/aliases6
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/connection_posix/test.sh23
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/current_container_facts/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/current_container_facts/tasks/main.yml41
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_compose/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_compose/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/main.yml47
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/tests/options.yml243
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/tests/start-stop.yml233
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_config/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_config/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/test_docker_config.yml334
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/files/env-file6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py21
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/main.yml65
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/comparisons.yml467
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/compatibility.yml122
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/image-ids.yml155
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml558
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/network.yml747
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/options.yml4696
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/ports.yml326
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml38
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/start-stop.yml459
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/update.yml212
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/main.yml45
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/tests/content.yml1197
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/tests/file.yml1065
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_exec/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_exec/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_exec/tasks/main.yml228
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_info/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_container_info/tasks/main.yml84
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_host_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_host_info/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/test_host_info.yml364
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/main.yml13
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/test.yml54
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/basic.yml139
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/docker_image.yml259
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/options.yml446
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/templates/ArgsDockerfile13
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/templates/Dockerfile7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/templates/EtcHostsDockerfile7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/templates/MyDockerfile9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image/templates/StagedDockerfile11
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image_info/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image_info/tasks/main.yml63
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image_load/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image_load/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/main.yml13
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/test.yml38
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/tests/basic.yml217
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_login/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_login/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/main.yml13
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/test.yml13
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/docker_login.yml150
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/multiple-servers.yml61
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/main.yml52
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/basic.yml138
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/ipam.yml309
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/options.yml234
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/overlay.yml62
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/substring.yml41
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network_info/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_network_info/tasks/main.yml80
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_node/aliases7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_node/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/main.yml41
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/test_node.yml844
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_node_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_node_info/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/test_node_info.yml92
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_plugin/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_plugin/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/main.yml34
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/tests/basic.yml192
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/tests/basic_with_alias.yml83
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_prune/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_prune/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_prune/tasks/main.yml153
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_secret/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_secret/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/test_secrets.yml222
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/test_stack.yml117
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_base.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack/vars/main.yml20
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_info/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml78
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_info/vars/main.yml20
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml88
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/vars/main.yml20
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm/aliases7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm/meta/main.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/cleanup.yml38
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/main.yml28
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/run-test.yml4
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/basic.yml163
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml133
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options.yml1163
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml95
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml194
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-16
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-26
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/main.yml83
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml463
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml138
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml117
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml606
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml453
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml2005
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml261
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml196
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml196
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml342
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml461
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml350
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/vars/main.yml60
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml85
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_volume/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_volume/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/main.yml34
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/run-test.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/tests/basic.yml181
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_volume_info/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_volume_info/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/docker_volume_info/tasks/main.yml77
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/files/nginx.conf50
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/filter_plugins/filter_attr.py20
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/meta/main.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/tasks/main.yml195
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/vars/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/aliases9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/tasks/main.yml90
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/aliases7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_1.docker.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_2.docker.yml11
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_cleanup.yml26
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_setup.yml26
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_1.yml40
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_2.yml49
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/runme.sh25
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/aliases8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/docker-machine24
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml22
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml55
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/runme.sh71
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/aliases7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml22
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml19
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml62
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml39
-rwxr-xr-xansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/runme.sh25
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/aliases5
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/defaults/main.yml23
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/handlers/main.yml19
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Alpine.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Archlinux.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Debian.yml50
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Fedora.yml28
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-7.yml46
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-8.yml39
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-9.yml39
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Suse.yml12
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/main.yml179
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Debian.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Fedora.yml4
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-7.yml13
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-8.yml17
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-9.yml17
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Suse.yml12
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-22.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/default.yml4
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.env6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/defaults/main.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Alpine.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Archlinux.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Debian.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Fedora.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-7.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-8.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-9.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Suse.yml12
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/main.yml16
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/setup.yml59
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/CentOS-8.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-7.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-8.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-9.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Suse-py2.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Suse-py3.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu-16.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu-18.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/default.yml4
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/defaults/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Alpine.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Archlinux.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Debian.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Fedora.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-7.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-8.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-9.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Suse.yml12
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/main.yml16
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/setup.yml50
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Alpine.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Archlinux.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Fedora.yml6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/default.yml4
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/aliases6
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.conf50
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd5
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml59
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/meta/main.yml9
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/main.yml13
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml120
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup.yml84
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/vars/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_epel/tasks/main.yml15
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_openssl/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_openssl/tasks/main.yml35
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Alpine.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Archlinux.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Debian.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/FreeBSD.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/RedHat.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Suse.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_paramiko/meta/main.yml8
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_paramiko/tasks/main.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_pkg_mgr/tasks/main.yml28
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/aliases5
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/meta/main.yml7
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/tasks/main.yml18
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml10
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml16
-rw-r--r--ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml20
-rw-r--r--ansible_collections/community/docker/tests/requirements.yml12
-rw-r--r--ansible_collections/community/docker/tests/sanity/extra/extra-docs.json10
-rw-r--r--ansible_collections/community/docker/tests/sanity/extra/extra-docs.json.license3
-rwxr-xr-xansible_collections/community/docker/tests/sanity/extra/extra-docs.py24
-rw-r--r--ansible_collections/community/docker/tests/sanity/extra/licenses.json4
-rw-r--r--ansible_collections/community/docker/tests/sanity/extra/licenses.json.license3
-rwxr-xr-xansible_collections/community/docker/tests/sanity/extra/licenses.py110
-rw-r--r--ansible_collections/community/docker/tests/sanity/extra/licenses.py.license3
-rw-r--r--ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json7
-rw-r--r--ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json.license3
-rwxr-xr-xansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.py44
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.10.txt11
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.10.txt.license3
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.11.txt11
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.11.txt.license3
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.12.txt3
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.12.txt.license3
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.13.txt2
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.13.txt.license3
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.14.txt2
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.14.txt.license3
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.15.txt2
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.15.txt.license3
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.9.txt10
-rw-r--r--ansible_collections/community/docker/tests/sanity/ignore-2.9.txt.license3
-rw-r--r--ansible_collections/community/docker/tests/unit/compat/__init__.py0
-rw-r--r--ansible_collections/community/docker/tests/unit/compat/builtins.py20
-rw-r--r--ansible_collections/community/docker/tests/unit/compat/mock.py30
-rw-r--r--ansible_collections/community/docker/tests/unit/compat/unittest.py25
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/connection/test_docker.py57
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/inventory/test_docker_containers.py214
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/api/test_client.py702
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/fake_api.py668
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/fake_stat.py145
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/test_auth.py819
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/test_errors.py141
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/transport/test_sshconn.py57
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/transport/test_ssladapter.py96
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_build.py515
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_config.py141
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_decorators.py54
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_json_stream.py77
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_ports.py162
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_proxy.py100
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_utils.py488
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/ca.pem7
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/cert.pem7
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/key.pem7
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/test__scramble.py28
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/test_copy.py77
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/test_image_archive.py94
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/module_utils/test_util.py522
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/modules/conftest.py32
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_image.py114
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_network.py35
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_swarm_service.py514
-rw-r--r--ansible_collections/community/docker/tests/unit/plugins/test_support/docker_image_archive_stubbing.py76
-rw-r--r--ansible_collections/community/docker/tests/unit/requirements.txt9
-rw-r--r--ansible_collections/community/docker/tests/utils/constraints.txt25
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/alpine.sh45
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/fedora.sh45
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/linux-community.sh22
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/linux.sh21
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/remote.sh45
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/rhel.sh45
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/sanity.sh27
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/shippable.sh233
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/ubuntu.sh45
-rwxr-xr-xansible_collections/community/docker/tests/utils/shippable/units.sh29
517 files changed, 72755 insertions, 0 deletions
diff --git a/ansible_collections/community/docker/.azure-pipelines/README.md b/ansible_collections/community/docker/.azure-pipelines/README.md
new file mode 100644
index 00000000..9e8ad741
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/README.md
@@ -0,0 +1,9 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+## Azure Pipelines Configuration
+
+Please see the [Documentation](https://github.com/ansible/community/wiki/Testing:-Azure-Pipelines) for more information.
diff --git a/ansible_collections/community/docker/.azure-pipelines/azure-pipelines.yml b/ansible_collections/community/docker/.azure-pipelines/azure-pipelines.yml
new file mode 100644
index 00000000..89182196
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/azure-pipelines.yml
@@ -0,0 +1,285 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+trigger:
+ batch: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+pr:
+ autoCancel: true
+ branches:
+ include:
+ - main
+ - stable-*
+
+schedules:
+ - cron: 0 9 * * *
+ displayName: Nightly
+ always: true
+ branches:
+ include:
+ - main
+ - cron: 0 12 * * 0
+ displayName: Weekly (old stable branches)
+ always: true
+ branches:
+ include:
+ - stable-2
+
+variables:
+ - name: checkoutPath
+ value: ansible_collections/community/docker
+ - name: coverageBranches
+ value: main
+ - name: pipelinesCoverage
+ value: coverage
+ - name: entryPoint
+ value: tests/utils/shippable/shippable.sh
+ - name: fetchDepth
+ value: 0
+
+resources:
+ containers:
+ - container: default
+ image: quay.io/ansible/azure-pipelines-test-container:3.0.0
+
+pool: Standard
+
+stages:
+
+### Sanity & units
+ - stage: Ansible_devel
+ displayName: Sanity & Units devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ targets:
+ - name: Sanity
+ test: 'devel/sanity/1'
+ - name: Sanity Extra # Only on devel
+ test: 'devel/sanity/extra'
+ - name: Units
+ test: 'devel/units/1'
+ - stage: Ansible_2_14
+ displayName: Sanity & Units 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ targets:
+ - name: Sanity
+ test: '2.14/sanity/1'
+ - name: Units
+ test: '2.14/units/1'
+ - stage: Ansible_2_13
+ displayName: Sanity & Units 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ targets:
+ - name: Sanity
+ test: '2.13/sanity/1'
+ - name: Units
+ test: '2.13/units/1'
+ - stage: Ansible_2_12
+ displayName: Sanity & Units 2.12
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ targets:
+ - name: Sanity
+ test: '2.12/sanity/1'
+ - name: Units
+ test: '2.12/units/1'
+
+### Docker
+ - stage: Docker_devel
+ displayName: Docker devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/linux/{0}
+ targets:
+ - name: CentOS 7
+ test: centos7
+ - name: Fedora 37
+ test: fedora37
+ - name: openSUSE 15
+ test: opensuse15
+ - name: Ubuntu 20.04
+ test: ubuntu2004
+ - name: Ubuntu 22.04
+ test: ubuntu2204
+ - name: Alpine 3
+ test: alpine3
+ groups:
+ - 4
+ - 5
+ - stage: Docker_2_14
+ displayName: Docker 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.14/linux/{0}
+ targets:
+ - name: Fedora 36
+ test: fedora36
+ groups:
+ - 4
+ - 5
+ - stage: Docker_2_13
+ displayName: Docker 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.13/linux/{0}
+ targets:
+ - name: CentOS 7
+ test: centos7
+ - name: Fedora 35
+ test: fedora35
+ - name: openSUSE 15 py2
+ test: opensuse15py2
+ - name: Alpine 3
+ test: alpine3
+ groups:
+ - 4
+ - 5
+ - stage: Docker_2_12
+ displayName: Docker 2.12
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.12/linux/{0}
+ targets:
+ - name: Fedora 33
+ test: fedora33
+ - name: Fedora 34
+ test: fedora34
+ - name: Ubuntu 18.04
+ test: ubuntu1804
+ groups:
+ - 4
+ - 5
+
+### Community Docker
+ - stage: Docker_community_devel
+ displayName: Docker (community images) devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/linux-community/{0}
+ targets:
+ - name: Debian Bullseye
+ test: debian-bullseye/3.9
+ - name: ArchLinux
+ test: archlinux/3.10
+ - name: CentOS Stream 8 with Python 3.6
+ test: centos-stream8/3.6
+ - name: CentOS Stream 8 with Python 3.9
+ test: centos-stream8/3.9
+ groups:
+ - 4
+ - 5
+
+### Remote
+ - stage: Remote_devel
+ displayName: Remote devel
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: devel/{0}
+ targets:
+ - name: RHEL 7.9
+ test: rhel/7.9
+ - name: RHEL 9.1 with latest Docker SDK from PyPi
+ test: rhel/9.1-pypi-latest
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - stage: Remote_2_14
+ displayName: Remote 2.14
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.14/{0}
+ targets:
+ - name: RHEL 9.0
+ test: rhel/9.0
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - stage: Remote_2_13
+ displayName: Remote 2.13
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.13/{0}
+ targets:
+ - name: RHEL 8.5
+ test: rhel/8.5
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+ - stage: Remote_2_12
+ displayName: Remote 2.12
+ dependsOn: []
+ jobs:
+ - template: templates/matrix.yml
+ parameters:
+ testFormat: 2.12/{0}
+ targets:
+ - name: RHEL 8.4
+ test: rhel/8.4
+ groups:
+ - 1
+ - 2
+ - 3
+ - 4
+ - 5
+
+ ## Finally
+
+ - stage: Summary
+ condition: succeededOrFailed()
+ dependsOn:
+ - Ansible_devel
+ - Ansible_2_14
+ - Ansible_2_13
+ - Ansible_2_12
+ - Remote_devel
+ - Remote_2_14
+ - Remote_2_13
+ - Remote_2_12
+ - Docker_devel
+ - Docker_2_14
+ - Docker_2_13
+ - Docker_2_12
+ - Docker_community_devel
+ jobs:
+ - template: templates/coverage.yml
diff --git a/ansible_collections/community/docker/.azure-pipelines/scripts/aggregate-coverage.sh b/ansible_collections/community/docker/.azure-pipelines/scripts/aggregate-coverage.sh
new file mode 100755
index 00000000..0ccef353
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/scripts/aggregate-coverage.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+# Aggregate code coverage results for later processing.
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eu
+
+agent_temp_directory="$1"
+
+PATH="${PWD}/bin:${PATH}"
+
+mkdir "${agent_temp_directory}/coverage/"
+
+if [[ "$(ansible --version)" =~ \ 2\.9\. ]]; then
+ exit
+fi
+
+options=(--venv --venv-system-site-packages --color -v)
+
+ansible-test coverage combine --group-by command --export "${agent_temp_directory}/coverage/" "${options[@]}"
+
+if ansible-test coverage analyze targets generate --help >/dev/null 2>&1; then
+ # Only analyze coverage if the installed version of ansible-test supports it.
+ # Doing so allows this script to work unmodified for multiple Ansible versions.
+ ansible-test coverage analyze targets generate "${agent_temp_directory}/coverage/coverage-analyze-targets.json" "${options[@]}"
+fi
diff --git a/ansible_collections/community/docker/.azure-pipelines/scripts/combine-coverage.py b/ansible_collections/community/docker/.azure-pipelines/scripts/combine-coverage.py
new file mode 100755
index 00000000..3b2fd993
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/scripts/combine-coverage.py
@@ -0,0 +1,64 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""
+Combine coverage data from multiple jobs, keeping the data only from the most recent attempt from each job.
+Coverage artifacts must be named using the format: "Coverage $(System.JobAttempt) {StableUniqueNameForEachJob}"
+The recommended coverage artifact name format is: Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)
+Keep in mind that Azure Pipelines does not enforce unique job display names (only names).
+It is up to pipeline authors to avoid name collisions when deviating from the recommended format.
+"""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import re
+import shutil
+import sys
+
+
+def main():
+ """Main program entry point."""
+ source_directory = sys.argv[1]
+
+ if '/ansible_collections/' in os.getcwd():
+ output_path = "tests/output"
+ else:
+ output_path = "test/results"
+
+ destination_directory = os.path.join(output_path, 'coverage')
+
+ if not os.path.exists(destination_directory):
+ os.makedirs(destination_directory)
+
+ jobs = {}
+ count = 0
+
+ for name in os.listdir(source_directory):
+ match = re.search('^Coverage (?P<attempt>[0-9]+) (?P<label>.+)$', name)
+ label = match.group('label')
+ attempt = int(match.group('attempt'))
+ jobs[label] = max(attempt, jobs.get(label, 0))
+
+ for label, attempt in jobs.items():
+ name = 'Coverage {attempt} {label}'.format(label=label, attempt=attempt)
+ source = os.path.join(source_directory, name)
+ source_files = os.listdir(source)
+
+ for source_file in source_files:
+ source_path = os.path.join(source, source_file)
+ destination_path = os.path.join(destination_directory, source_file + '.' + label)
+ print('"%s" -> "%s"' % (source_path, destination_path))
+ shutil.copyfile(source_path, destination_path)
+ count += 1
+
+ print('Coverage file count: %d' % count)
+ print('##vso[task.setVariable variable=coverageFileCount]%d' % count)
+ print('##vso[task.setVariable variable=outputPath]%s' % output_path)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/.azure-pipelines/scripts/process-results.sh b/ansible_collections/community/docker/.azure-pipelines/scripts/process-results.sh
new file mode 100755
index 00000000..1a5d79ff
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/scripts/process-results.sh
@@ -0,0 +1,28 @@
+#!/usr/bin/env bash
+# Check the test results and set variables for use in later steps.
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eu
+
+if [[ "$PWD" =~ /ansible_collections/ ]]; then
+ output_path="tests/output"
+else
+ output_path="test/results"
+fi
+
+echo "##vso[task.setVariable variable=outputPath]${output_path}"
+
+if compgen -G "${output_path}"'/junit/*.xml' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveTestResults]true"
+fi
+
+if compgen -G "${output_path}"'/bot/ansible-test-*' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveBotResults]true"
+fi
+
+if compgen -G "${output_path}"'/coverage/*' > /dev/null; then
+ echo "##vso[task.setVariable variable=haveCoverageData]true"
+fi
diff --git a/ansible_collections/community/docker/.azure-pipelines/scripts/publish-codecov.py b/ansible_collections/community/docker/.azure-pipelines/scripts/publish-codecov.py
new file mode 100755
index 00000000..58e32f6d
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/scripts/publish-codecov.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""
+Upload code coverage reports to codecov.io.
+Multiple coverage files from multiple languages are accepted and aggregated after upload.
+Python coverage, as well as PowerShell and Python stubs can all be uploaded.
+"""
+
+import argparse
+import dataclasses
+import pathlib
+import shutil
+import subprocess
+import tempfile
+import typing as t
+import urllib.request
+
+
+@dataclasses.dataclass(frozen=True)
+class CoverageFile:
+ name: str
+ path: pathlib.Path
+ flags: t.List[str]
+
+
+@dataclasses.dataclass(frozen=True)
+class Args:
+ dry_run: bool
+ path: pathlib.Path
+
+
+def parse_args() -> Args:
+ parser = argparse.ArgumentParser()
+ parser.add_argument('-n', '--dry-run', action='store_true')
+ parser.add_argument('path', type=pathlib.Path)
+
+ args = parser.parse_args()
+
+ # Store arguments in a typed dataclass
+ fields = dataclasses.fields(Args)
+ kwargs = {field.name: getattr(args, field.name) for field in fields}
+
+ return Args(**kwargs)
+
+
+def process_files(directory: pathlib.Path) -> t.Tuple[CoverageFile, ...]:
+ processed = []
+ for file in directory.joinpath('reports').glob('coverage*.xml'):
+ name = file.stem.replace('coverage=', '')
+
+ # Get flags from name
+ flags = name.replace('-powershell', '').split('=') # Drop '-powershell' suffix
+ flags = [flag if not flag.startswith('stub') else flag.split('-')[0] for flag in flags] # Remove "-01" from stub files
+
+ processed.append(CoverageFile(name, file, flags))
+
+ return tuple(processed)
+
+
+def upload_files(codecov_bin: pathlib.Path, files: t.Tuple[CoverageFile, ...], dry_run: bool = False) -> None:
+ for file in files:
+ cmd = [
+ str(codecov_bin),
+ '--name', file.name,
+ '--file', str(file.path),
+ ]
+ for flag in file.flags:
+ cmd.extend(['--flags', flag])
+
+ if dry_run:
+ print(f'DRY-RUN: Would run command: {cmd}')
+ continue
+
+ subprocess.run(cmd, check=True)
+
+
+def download_file(url: str, dest: pathlib.Path, flags: int, dry_run: bool = False) -> None:
+ if dry_run:
+ print(f'DRY-RUN: Would download {url} to {dest} and set mode to {flags:o}')
+ return
+
+ with urllib.request.urlopen(url) as resp:
+ with dest.open('w+b') as f:
+ # Read data in chunks rather than all at once
+ shutil.copyfileobj(resp, f, 64 * 1024)
+
+ dest.chmod(flags)
+
+
+def main():
+ args = parse_args()
+ url = 'https://ansible-ci-files.s3.amazonaws.com/codecov/linux/codecov'
+ with tempfile.TemporaryDirectory(prefix='codecov-') as tmpdir:
+ codecov_bin = pathlib.Path(tmpdir) / 'codecov'
+ download_file(url, codecov_bin, 0o755, args.dry_run)
+
+ files = process_files(args.path)
+ upload_files(codecov_bin, files, args.dry_run)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/.azure-pipelines/scripts/report-coverage.sh b/ansible_collections/community/docker/.azure-pipelines/scripts/report-coverage.sh
new file mode 100755
index 00000000..e8d82c74
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/scripts/report-coverage.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# Generate code coverage reports for uploading to Azure Pipelines and codecov.io.
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eu
+
+PATH="${PWD}/bin:${PATH}"
+
+if [[ "$(ansible --version)" =~ \ 2\.9\. ]]; then
+ exit
+fi
+
+if ! ansible-test --help >/dev/null 2>&1; then
+ # Install the devel version of ansible-test for generating code coverage reports.
+ # This is only used by Ansible Collections, which are typically tested against multiple Ansible versions (in separate jobs).
+ # Since a version of ansible-test is required that can work the output from multiple older releases, the devel version is used.
+ pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
+fi
+
+ansible-test coverage xml --group-by command --stub --venv --venv-system-site-packages --color -v
diff --git a/ansible_collections/community/docker/.azure-pipelines/scripts/run-tests.sh b/ansible_collections/community/docker/.azure-pipelines/scripts/run-tests.sh
new file mode 100755
index 00000000..4ed20d55
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/scripts/run-tests.sh
@@ -0,0 +1,38 @@
+#!/usr/bin/env bash
+# Configure the test environment and run the tests.
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eu
+
+entry_point="$1"
+test="$2"
+read -r -a coverage_branches <<< "$3" # space separated list of branches to run code coverage on for scheduled builds
+
+export COMMIT_MESSAGE
+export COMPLETE
+export COVERAGE
+export IS_PULL_REQUEST
+
+if [ "${SYSTEM_PULLREQUEST_TARGETBRANCH:-}" ]; then
+ IS_PULL_REQUEST=true
+ COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD^2)
+else
+ IS_PULL_REQUEST=
+ COMMIT_MESSAGE=$(git log --format=%B -n 1 HEAD)
+fi
+
+COMPLETE=
+COVERAGE=
+
+if [ "${BUILD_REASON}" = "Schedule" ]; then
+ COMPLETE=yes
+
+ if printf '%s\n' "${coverage_branches[@]}" | grep -q "^${BUILD_SOURCEBRANCHNAME}$"; then
+ COVERAGE=yes
+ fi
+fi
+
+"${entry_point}" "${test}" 2>&1 | "$(dirname "$0")/time-command.py"
diff --git a/ansible_collections/community/docker/.azure-pipelines/scripts/time-command.py b/ansible_collections/community/docker/.azure-pipelines/scripts/time-command.py
new file mode 100755
index 00000000..85a7c3c1
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/scripts/time-command.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Prepends a relative timestamp to each input line from stdin and writes it to stdout."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+import time
+
+
+def main():
+ """Main program entry point."""
+ start = time.time()
+
+ sys.stdin.reconfigure(errors='surrogateescape')
+ sys.stdout.reconfigure(errors='surrogateescape')
+
+ for line in sys.stdin:
+ seconds = time.time() - start
+ sys.stdout.write('%02d:%02d %s' % (seconds // 60, seconds % 60, line))
+ sys.stdout.flush()
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/.azure-pipelines/templates/coverage.yml b/ansible_collections/community/docker/.azure-pipelines/templates/coverage.yml
new file mode 100644
index 00000000..75084511
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/templates/coverage.yml
@@ -0,0 +1,43 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This template adds a job for processing code coverage data.
+# It will upload results to Azure Pipelines and codecov.io.
+# Use it from a job stage that completes after all other jobs have completed.
+# This can be done by placing it in a separate summary stage that runs after the test stage(s) have completed.
+
+jobs:
+ - job: Coverage
+ displayName: Code Coverage
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - task: DownloadPipelineArtifact@2
+ displayName: Download Coverage Data
+ inputs:
+ path: coverage/
+ patterns: "Coverage */*=coverage.combined"
+ - bash: .azure-pipelines/scripts/combine-coverage.py coverage/
+ displayName: Combine Coverage Data
+ - bash: .azure-pipelines/scripts/report-coverage.sh
+ displayName: Generate Coverage Report
+ condition: gt(variables.coverageFileCount, 0)
+ - task: PublishCodeCoverageResults@1
+ inputs:
+ codeCoverageTool: Cobertura
+ # Azure Pipelines only accepts a single coverage data file.
+ # That means only Python or PowerShell coverage can be uploaded, but not both.
+ # Set the "pipelinesCoverage" variable to determine which type is uploaded.
+ # Use "coverage" for Python and "coverage-powershell" for PowerShell.
+ summaryFileLocation: "$(outputPath)/reports/$(pipelinesCoverage).xml"
+ displayName: Publish to Azure Pipelines
+ condition: gt(variables.coverageFileCount, 0)
+ - bash: .azure-pipelines/scripts/publish-codecov.py "$(outputPath)"
+ displayName: Publish to codecov.io
+ condition: gt(variables.coverageFileCount, 0)
+ continueOnError: true
diff --git a/ansible_collections/community/docker/.azure-pipelines/templates/matrix.yml b/ansible_collections/community/docker/.azure-pipelines/templates/matrix.yml
new file mode 100644
index 00000000..85a869a8
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/templates/matrix.yml
@@ -0,0 +1,59 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This template uses the provided targets and optional groups to generate a matrix which is then passed to the test template.
+# If this matrix template does not provide the required functionality, consider using the test template directly instead.
+
+parameters:
+ # A required list of dictionaries, one per test target.
+ # Each item in the list must contain a "test" or "name" key.
+ # Both may be provided. If one is omitted, the other will be used.
+ - name: targets
+ type: object
+
+ # An optional list of values which will be used to multiply the targets list into a matrix.
+ # Values can be strings or numbers.
+ - name: groups
+ type: object
+ default: []
+
+ # An optional format string used to generate the job name.
+ # - {0} is the name of an item in the targets list.
+ - name: nameFormat
+ type: string
+ default: "{0}"
+
+ # An optional format string used to generate the test name.
+ # - {0} is the name of an item in the targets list.
+ - name: testFormat
+ type: string
+ default: "{0}"
+
+ # An optional format string used to add the group to the job name.
+ # {0} is the formatted name of an item in the targets list.
+ # {{1}} is the group -- be sure to include the double "{{" and "}}".
+ - name: nameGroupFormat
+ type: string
+ default: "{0} - {{1}}"
+
+ # An optional format string used to add the group to the test name.
+ # {0} is the formatted test of an item in the targets list.
+ # {{1}} is the group -- be sure to include the double "{{" and "}}".
+ - name: testGroupFormat
+ type: string
+ default: "{0}/{{1}}"
+
+jobs:
+ - template: test.yml
+ parameters:
+ jobs:
+ - ${{ if eq(length(parameters.groups), 0) }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(parameters.nameFormat, coalesce(target.name, target.test)) }}
+ test: ${{ format(parameters.testFormat, coalesce(target.test, target.name)) }}
+ - ${{ if not(eq(length(parameters.groups), 0)) }}:
+ - ${{ each group in parameters.groups }}:
+ - ${{ each target in parameters.targets }}:
+ - name: ${{ format(format(parameters.nameGroupFormat, parameters.nameFormat), coalesce(target.name, target.test), group) }}
+ test: ${{ format(format(parameters.testGroupFormat, parameters.testFormat), coalesce(target.test, target.name), group) }}
diff --git a/ansible_collections/community/docker/.azure-pipelines/templates/test.yml b/ansible_collections/community/docker/.azure-pipelines/templates/test.yml
new file mode 100644
index 00000000..90501327
--- /dev/null
+++ b/ansible_collections/community/docker/.azure-pipelines/templates/test.yml
@@ -0,0 +1,49 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# This template uses the provided list of jobs to create test one or more test jobs.
+# It can be used directly if needed, or through the matrix template.
+
+parameters:
+ # A required list of dictionaries, one per test job.
+ # Each item in the list must contain a "job" and "name" key.
+ - name: jobs
+ type: object
+
+jobs:
+ - ${{ each job in parameters.jobs }}:
+ - job: test_${{ replace(replace(replace(job.test, '/', '_'), '.', '_'), '-', '_') }}
+ displayName: ${{ job.name }}
+ container: default
+ workspace:
+ clean: all
+ steps:
+ - checkout: self
+ fetchDepth: $(fetchDepth)
+ path: $(checkoutPath)
+ - bash: .azure-pipelines/scripts/run-tests.sh "$(entryPoint)" "${{ job.test }}" "$(coverageBranches)"
+ displayName: Run Tests
+ - bash: .azure-pipelines/scripts/process-results.sh
+ condition: succeededOrFailed()
+ displayName: Process Results
+ - bash: .azure-pipelines/scripts/aggregate-coverage.sh "$(Agent.TempDirectory)"
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Aggregate Coverage Data
+ - task: PublishTestResults@2
+ condition: eq(variables.haveTestResults, 'true')
+ inputs:
+ testResultsFiles: "$(outputPath)/junit/*.xml"
+ displayName: Publish Test Results
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveBotResults, 'true')
+ displayName: Publish Bot Results
+ inputs:
+ targetPath: "$(outputPath)/bot/"
+ artifactName: "Bot $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
+ - task: PublishPipelineArtifact@1
+ condition: eq(variables.haveCoverageData, 'true')
+ displayName: Publish Coverage Data
+ inputs:
+ targetPath: "$(Agent.TempDirectory)/coverage/"
+ artifactName: "Coverage $(System.JobAttempt) $(System.StageDisplayName) $(System.JobDisplayName)"
diff --git a/ansible_collections/community/docker/.github/dependabot.yml b/ansible_collections/community/docker/.github/dependabot.yml
new file mode 100644
index 00000000..2f4ff900
--- /dev/null
+++ b/ansible_collections/community/docker/.github/dependabot.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: 2
+updates:
+ - package-ecosystem: "github-actions"
+ directory: "/"
+ schedule:
+ interval: "weekly"
diff --git a/ansible_collections/community/docker/.github/patchback.yml b/ansible_collections/community/docker/.github/patchback.yml
new file mode 100644
index 00000000..5ee7812e
--- /dev/null
+++ b/ansible_collections/community/docker/.github/patchback.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+backport_branch_prefix: patchback/backports/
+backport_label_prefix: backport-
+target_branch_prefix: stable-
+...
diff --git a/ansible_collections/community/docker/.github/workflows/ansible-test.yml b/ansible_collections/community/docker/.github/workflows/ansible-test.yml
new file mode 100644
index 00000000..e2a5e30a
--- /dev/null
+++ b/ansible_collections/community/docker/.github/workflows/ansible-test.yml
@@ -0,0 +1,150 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# For the comprehensive list of the inputs supported by the ansible-community/ansible-test-gh-action GitHub Action, see
+# https://github.com/marketplace/actions/ansible-test
+
+name: EOL CI
+on:
+ # Run EOL CI against all pushes (direct commits, also merged PRs), Pull Requests
+ push:
+ branches:
+ - main
+ - stable-*
+ pull_request:
+ # Run EOL CI once per day (at 09:00 UTC)
+ schedule:
+ - cron: '0 9 * * *'
+
+concurrency:
+ # Make sure there is at most one active run per PR, but do not cancel any non-PR runs
+ group: ${{ github.workflow }}-${{ (github.head_ref && github.event.number) || github.run_id }}
+ cancel-in-progress: true
+
+jobs:
+ sanity:
+ name: EOL Sanity (Ⓐ${{ matrix.ansible }})
+ strategy:
+ matrix:
+ ansible:
+ - '2.11'
+ # Ansible-test on various stable branches does not yet work well with cgroups v2.
+ # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
+ # image for these stable branches. The list of branches where this is necessary will
+ # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
+ # for the latest list.
+ runs-on: >-
+ ${{ contains(fromJson(
+ '["2.9", "2.10", "2.11"]'
+ ), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
+ steps:
+ - name: Perform sanity testing
+ uses: felixfontein/ansible-test-gh-action@main
+ with:
+ ansible-core-github-repository-slug: felixfontein/ansible
+ ansible-core-version: stable-${{ matrix.ansible }}
+ coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
+ pull-request-change-detection: 'true'
+ testing-type: sanity
+
+ units:
+ # Ansible-test on various stable branches does not yet work well with cgroups v2.
+ # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
+ # image for these stable branches. The list of branches where this is necessary will
+ # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
+ # for the latest list.
+ runs-on: >-
+ ${{ contains(fromJson(
+ '["2.9", "2.10", "2.11"]'
+ ), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
+ name: EOL Units (Ⓐ${{ matrix.ansible }})
+ strategy:
+ # As soon as the first unit test fails, cancel the others to free up the CI queue
+ fail-fast: true
+ matrix:
+ ansible:
+ - '2.11'
+
+ steps:
+ - name: >-
+ Perform unit testing against
+ Ansible version ${{ matrix.ansible }}
+ uses: felixfontein/ansible-test-gh-action@main
+ with:
+ ansible-core-github-repository-slug: felixfontein/ansible
+ ansible-core-version: stable-${{ matrix.ansible }}
+ coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
+ pull-request-change-detection: 'true'
+ testing-type: units
+
+ integration:
+ # Ansible-test on various stable branches does not yet work well with cgroups v2.
+ # Since ubuntu-latest now uses Ubuntu 22.04, we need to fall back to the ubuntu-20.04
+ # image for these stable branches. The list of branches where this is necessary will
+ # shrink over time, check out https://github.com/ansible-collections/news-for-maintainers/issues/28
+ # for the latest list.
+ runs-on: >-
+ ${{ contains(fromJson(
+ '["2.9", "2.10", "2.11"]'
+ ), matrix.ansible) && 'ubuntu-20.04' || 'ubuntu-latest' }}
+ name: EOL I (Ⓐ${{ matrix.ansible }}+${{ matrix.docker }}+py${{ matrix.python }}:${{ matrix.target }})
+ strategy:
+ fail-fast: false
+ matrix:
+ ansible:
+ - ''
+ docker:
+ - ''
+ python:
+ - ''
+ target:
+ - ''
+ exclude:
+ - ansible: ''
+ include:
+ # 2.11
+ - ansible: '2.11'
+ docker: fedora32
+ python: ''
+ target: azp/4/
+ - ansible: '2.11'
+ docker: fedora32
+ python: ''
+ target: azp/5/
+ - ansible: '2.11'
+ docker: alpine3
+ python: ''
+ target: azp/4/
+ - ansible: '2.11'
+ docker: alpine3
+ python: ''
+ target: azp/5/
+
+ steps:
+ - name: >-
+ Perform integration testing against
+ Ansible version ${{ matrix.ansible }}
+ under Python ${{ matrix.python }}
+ uses: felixfontein/ansible-test-gh-action@main
+ with:
+ ansible-core-github-repository-slug: felixfontein/ansible
+ ansible-core-version: stable-${{ matrix.ansible }}
+ coverage: ${{ github.event_name == 'schedule' && 'always' || 'never' }}
+ docker-image: ${{ matrix.docker }}
+ integration-continue-on-error: 'false'
+ integration-diff: 'false'
+ integration-retry-on-error: 'true'
+ pre-test-cmd: >-
+ mkdir -p ../../ansible
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git ../../ansible/posix
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git ../../community/crypto
+ ;
+ git clone --depth=1 --single-branch https://github.com/ansible-collections/community.general.git ../../community/general
+ pull-request-change-detection: 'true'
+ target: ${{ matrix.target }}
+ target-python-version: ${{ matrix.python }}
+ testing-type: integration
diff --git a/ansible_collections/community/docker/.github/workflows/docs-pr.yml b/ansible_collections/community/docker/.github/workflows/docs-pr.yml
new file mode 100644
index 00000000..c63bdbfc
--- /dev/null
+++ b/ansible_collections/community/docker/.github/workflows/docs-pr.yml
@@ -0,0 +1,92 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: Collection Docs
+concurrency:
+ group: docs-pr-${{ github.head_ref }}
+ cancel-in-progress: true
+on:
+ pull_request_target:
+ types: [opened, synchronize, reopened, closed]
+
+env:
+ GHP_BASE_URL: https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}
+
+jobs:
+ build-docs:
+ permissions:
+ contents: read
+ name: Build Ansible Docs
+ uses: felixfontein/github-docs-build/.github/workflows/_shared-docs-build-pr.yml@base-ref
+ with:
+ collection-name: community.docker
+ init-lenient: false
+ init-fail-on-error: true
+ squash-hierarchy: true
+ init-project: Community.Docker Collection
+ init-copyright: Community.Docker Contributors
+ init-title: Community.Docker Collection Documentation
+ init-html-short-title: Community.Docker Collection Docs
+ init-extra-html-theme-options: |
+ documentation_home_url=https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/branch/main/
+ render-file-line: '> * `$<status>` [$<path_tail>](https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/pr/${{ github.event.number }}/$<path_tail>)'
+
+ publish-docs-gh-pages:
+ # for now we won't run this on forks
+ if: github.repository == 'ansible-collections/community.docker'
+ permissions:
+ contents: write
+ needs: [build-docs]
+ name: Publish Ansible Docs
+ uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main
+ with:
+ artifact-name: ${{ needs.build-docs.outputs.artifact-name }}
+ action: ${{ (github.event.action == 'closed' || needs.build-docs.outputs.changed != 'true') && 'teardown' || 'publish' }}
+ secrets:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+
+ comment:
+ permissions:
+ pull-requests: write
+ runs-on: ubuntu-latest
+ needs: [build-docs, publish-docs-gh-pages]
+ name: PR comments
+ steps:
+ - name: PR comment
+ uses: ansible-community/github-docs-build/actions/ansible-docs-build-comment@main
+ with:
+ body-includes: '## Docs Build'
+ reactions: heart
+ action: ${{ needs.build-docs.outputs.changed != 'true' && 'remove' || '' }}
+ on-closed-body: |
+ ## Docs Build 📝
+
+ This PR is closed and any previously published docsite has been unpublished.
+ on-merged-body: |
+ ## Docs Build 📝
+
+ Thank you for contribution!✨
+
+ This PR has been merged and the docs are now incorporated into `main`:
+ ${{ env.GHP_BASE_URL }}/branch/main
+ body: |
+ ## Docs Build 📝
+
+ Thank you for contribution!✨
+
+ The docs for **this PR** have been published here:
+ ${{ env.GHP_BASE_URL }}/pr/${{ github.event.number }}
+
+ You can compare to the docs for the `main` branch here:
+ ${{ env.GHP_BASE_URL }}/branch/main
+
+ The docsite for **this PR** is also available for download as an artifact from this run:
+ ${{ needs.build-docs.outputs.artifact-url }}
+
+ File changes:
+
+ ${{ needs.build-docs.outputs.diff-files-rendered }}
+
+ ${{ needs.build-docs.outputs.diff-rendered }}
diff --git a/ansible_collections/community/docker/.github/workflows/docs-push.yml b/ansible_collections/community/docker/.github/workflows/docs-push.yml
new file mode 100644
index 00000000..ccc32085
--- /dev/null
+++ b/ansible_collections/community/docker/.github/workflows/docs-push.yml
@@ -0,0 +1,52 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: Collection Docs
+concurrency:
+ group: docs-push-${{ github.sha }}
+ cancel-in-progress: true
+on:
+ push:
+ branches:
+ - main
+ - stable-*
+ tags:
+ - '*'
+ # Run CI once per day (at 09:00 UTC)
+ schedule:
+ - cron: '0 9 * * *'
+ # Allow manual trigger (for newer antsibull-docs, sphinx-ansible-theme, ... versions)
+ workflow_dispatch:
+
+jobs:
+ build-docs:
+ permissions:
+ contents: read
+ name: Build Ansible Docs
+ uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-push.yml@main
+ with:
+ collection-name: community.docker
+ init-lenient: false
+ init-fail-on-error: true
+ squash-hierarchy: true
+ init-project: Community.Docker Collection
+ init-copyright: Community.Docker Contributors
+ init-title: Community.Docker Collection Documentation
+ init-html-short-title: Community.Docker Collection Docs
+ init-extra-html-theme-options: |
+ documentation_home_url=https://${{ github.repository_owner }}.github.io/${{ github.event.repository.name }}/branch/main/
+
+ publish-docs-gh-pages:
+ # for now we won't run this on forks
+ if: github.repository == 'ansible-collections/community.docker'
+ permissions:
+ contents: write
+ needs: [build-docs]
+ name: Publish Ansible Docs
+ uses: ansible-community/github-docs-build/.github/workflows/_shared-docs-build-publish-gh-pages.yml@main
+ with:
+ artifact-name: ${{ needs.build-docs.outputs.artifact-name }}
+ secrets:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
diff --git a/ansible_collections/community/docker/.github/workflows/ee.yml b/ansible_collections/community/docker/.github/workflows/ee.yml
new file mode 100644
index 00000000..9a1d74c8
--- /dev/null
+++ b/ansible_collections/community/docker/.github/workflows/ee.yml
@@ -0,0 +1,118 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: execution environment
+on:
+ # Run CI against all pushes (direct commits, also merged PRs), Pull Requests
+ push:
+ branches:
+ - main
+ - stable-*
+ pull_request:
+ # Run CI once per day (at 04:30 UTC)
+ # This ensures that even if there haven't been commits that we are still testing against latest version of ansible-builder
+ schedule:
+ - cron: '30 4 * * *'
+
+env:
+ NAMESPACE: community
+ COLLECTION_NAME: docker
+
+jobs:
+ build:
+ name: Build and test EE (Ⓐ${{ matrix.runner_tag }})
+ strategy:
+ matrix:
+ runner_tag:
+ - devel
+ - stable-2.12-latest
+ - stable-2.11-latest
+ - stable-2.9-latest
+ runs-on: ubuntu-latest
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+ with:
+ path: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
+
+ - name: Set up Python
+ uses: actions/setup-python@v4
+ with:
+ python-version: '3.10'
+
+ - name: Install ansible-builder and ansible-navigator
+ run: pip install ansible-builder ansible-navigator
+
+ - name: Verify requirements
+ run: ansible-builder introspect --sanitize .
+
+ - name: Make sure galaxy.yml has version entry
+ run: >-
+ python -c
+ 'import yaml ;
+ f = open("galaxy.yml", "rb") ;
+ data = yaml.safe_load(f) ;
+ f.close() ;
+ data["version"] = data.get("version") or "0.0.1" ;
+ f = open("galaxy.yml", "wb") ;
+ f.write(yaml.dump(data).encode("utf-8")) ;
+ f.close() ;
+ '
+ working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
+
+ - name: Build collection
+ run: |
+ ansible-galaxy collection build --output-path ../../../
+ working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}
+
+ - name: Create files for building execution environment
+ run: |
+ COLLECTION_FILENAME="$(ls "${{ env.NAMESPACE }}-${{ env.COLLECTION_NAME }}"-*.tar.gz)"
+
+ # EE config
+ cat > execution-environment.yml <<EOF
+ ---
+ version: 1
+ build_arg_defaults:
+ EE_BASE_IMAGE: 'quay.io/ansible/ansible-runner:${{ matrix.runner_tag }}'
+ dependencies:
+ galaxy: requirements.yml
+ EOF
+ echo "::group::execution-environment.yml"
+ cat execution-environment.yml
+ echo "::endgroup::"
+
+ # Requirements
+ cat > requirements.yml <<EOF
+ ---
+ collections:
+ - name: ${COLLECTION_FILENAME}
+ type: file
+ EOF
+ echo "::group::requirements.yml"
+ cat requirements.yml
+ echo "::endgroup::"
+
+ - name: Build image based on ${{ matrix.runner_tag }}
+ run: |
+ mkdir -p context/_build/
+ cp "${{ env.NAMESPACE }}-${{ env.COLLECTION_NAME }}"-*.tar.gz context/_build/
+ ansible-builder build -v 3 -t test-ee:latest --container-runtime=docker
+
+ - name: Make /var/run/docker.sock accessible by everyone
+ run: sudo chmod a+rw /var/run/docker.sock
+
+ - name: Run basic tests
+ run: >
+ ansible-navigator run
+ --mode stdout
+ --pull-policy never
+ --set-environment-variable ANSIBLE_PRIVATE_ROLE_VARS=true
+ --container-engine docker
+ --container-options=-v --container-options=/var/run/docker.sock:/var/run/docker.sock
+ --execution-environment-image test-ee:latest
+ -v
+ all.yml
+ working-directory: ansible_collections/${{ env.NAMESPACE }}/${{ env.COLLECTION_NAME }}/tests/ee
diff --git a/ansible_collections/community/docker/.github/workflows/reuse.yml b/ansible_collections/community/docker/.github/workflows/reuse.yml
new file mode 100644
index 00000000..8d9ebde8
--- /dev/null
+++ b/ansible_collections/community/docker/.github/workflows/reuse.yml
@@ -0,0 +1,32 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+name: Verify REUSE
+
+on:
+ push:
+ branches: [main]
+ pull_request:
+ branches: [main]
+ # Run CI once per day (at 04:30 UTC)
+ schedule:
+ - cron: '30 4 * * *'
+
+jobs:
+ check:
+ permissions:
+ contents: read
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v3
+
+ - name: Install dependencies
+ run: |
+ pip install reuse
+
+ - name: Check REUSE compliance
+ run: |
+ reuse lint
diff --git a/ansible_collections/community/docker/.reuse/dep5 b/ansible_collections/community/docker/.reuse/dep5
new file mode 100644
index 00000000..0c3745eb
--- /dev/null
+++ b/ansible_collections/community/docker/.reuse/dep5
@@ -0,0 +1,5 @@
+Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+
+Files: changelogs/fragments/*
+Copyright: Ansible Project
+License: GPL-3.0-or-later
diff --git a/ansible_collections/community/docker/CHANGELOG.rst b/ansible_collections/community/docker/CHANGELOG.rst
new file mode 100644
index 00000000..01047294
--- /dev/null
+++ b/ansible_collections/community/docker/CHANGELOG.rst
@@ -0,0 +1,895 @@
+=========================================
+Docker Community Collection Release Notes
+=========================================
+
+.. contents:: Topics
+
+
+v3.4.2
+======
+
+Release Summary
+---------------
+
+Bugfix release.
+
+Bugfixes
+--------
+
+- docker_prune - return correct value for ``changed``. So far the module always claimed that nothing changed (https://github.com/ansible-collections/community.docker/pull/593).
+
+v3.4.1
+======
+
+Release Summary
+---------------
+
+Regular bugfix release.
+
+Bugfixes
+--------
+
+- docker_api connection plugin, docker_container_exec, docker_container_copy_into - properly close socket to Daemon after executing commands in containers (https://github.com/ansible-collections/community.docker/pull/582).
+- docker_container - fix ``tmfs_size`` and ``tmpfs_mode`` not being set (https://github.com/ansible-collections/community.docker/pull/580).
+- various plugins and modules - remove unnecessary imports (https://github.com/ansible-collections/community.docker/pull/574).
+
+v3.4.0
+======
+
+Release Summary
+---------------
+
+Regular bugfix and feature release.
+
+Minor Changes
+-------------
+
+- docker_api connection plugin - when copying files to/from a container, stream the file contents instead of first reading them to memory (https://github.com/ansible-collections/community.docker/pull/545).
+- docker_host_info - allow to list all containers with new option ``containers_all`` (https://github.com/ansible-collections/community.docker/issues/535, https://github.com/ansible-collections/community.docker/pull/538).
+
+Bugfixes
+--------
+
+- docker_api connection plugin - fix error handling when 409 Conflict is returned by the Docker daemon in case of a stopped container (https://github.com/ansible-collections/community.docker/pull/546).
+- docker_container_exec - fix error handling when 409 Conflict is returned by the Docker daemon in case of a stopped container (https://github.com/ansible-collections/community.docker/pull/546).
+- docker_plugin - do not crash if plugin is installed in check mode (https://github.com/ansible-collections/community.docker/issues/552, https://github.com/ansible-collections/community.docker/pull/553).
+- most modules - fix handling of ``DOCKER_TIMEOUT`` environment variable, and improve handling of other fallback environment variables (https://github.com/ansible-collections/community.docker/issues/551, https://github.com/ansible-collections/community.docker/pull/554).
+
+New Modules
+-----------
+
+- docker_container_copy_into - Copy a file into a Docker container
+
+v3.3.2
+======
+
+Release Summary
+---------------
+
+Bugfix release.
+
+Bugfixes
+--------
+
+- docker_container - when ``detach=false``, wait indefinitely and not at most one minute. This was the behavior with Docker SDK for Python, and was accidentally changed in 3.0.0 (https://github.com/ansible-collections/community.docker/issues/526, https://github.com/ansible-collections/community.docker/pull/527).
+
+v3.3.1
+======
+
+Release Summary
+---------------
+
+Bugfix release.
+
+Bugfixes
+--------
+
+- current_container_facts - make container detection work better in more cases (https://github.com/ansible-collections/community.docker/pull/522).
+
+v3.3.0
+======
+
+Release Summary
+---------------
+
+Feature and bugfix release.
+
+Minor Changes
+-------------
+
+- current_container_facts - make work with current Docker version, also support Podman (https://github.com/ansible-collections/community.docker/pull/510).
+- docker_image - when using ``archive_path``, detect whether changes are necessary based on the image ID (hash). If the existing tar archive matches the source, do nothing. Previously, each task execution re-created the archive (https://github.com/ansible-collections/community.docker/pull/500).
+
+Bugfixes
+--------
+
+- docker_container_exec - fix ``chdir`` option which was ignored since community.docker 3.0.0 (https://github.com/ansible-collections/community.docker/issues/517, https://github.com/ansible-collections/community.docker/pull/518).
+- vendored latest Docker SDK for Python bugfix (https://github.com/ansible-collections/community.docker/pull/513, https://github.com/docker/docker-py/issues/3045).
+
+v3.2.2
+======
+
+Release Summary
+---------------
+
+Bugfix release.
+
+Bugfixes
+--------
+
+- docker_container - the ``kill_signal`` option erroneously did not accept strings anymore since 3.0.0 (https://github.com/ansible-collections/community.docker/issues/505, https://github.com/ansible-collections/community.docker/pull/506).
+
+v3.2.1
+======
+
+Release Summary
+---------------
+
+Maintenance release with improved documentation.
+
+v3.2.0
+======
+
+Release Summary
+---------------
+
+Feature and deprecation release.
+
+Minor Changes
+-------------
+
+- docker_container - added ``image_name_mismatch`` option which allows to control the behavior if the container uses the image specified, but the container's configuration uses a different name for the image than the one provided to the module (https://github.com/ansible-collections/community.docker/issues/485, https://github.com/ansible-collections/community.docker/pull/488).
+
+Deprecated Features
+-------------------
+
+- docker_container - the ``ignore_image`` option is deprecated and will be removed in community.docker 4.0.0. Use ``image: ignore`` in ``comparisons`` instead (https://github.com/ansible-collections/community.docker/pull/487).
+- docker_container - the ``purge_networks`` option is deprecated and will be removed in community.docker 4.0.0. Use ``networks: strict`` in ``comparisons`` instead, and make sure to provide ``networks``, with value ``[]`` if all networks should be removed (https://github.com/ansible-collections/community.docker/pull/487).
+
+v3.1.0
+======
+
+Release Summary
+---------------
+
+Feature release.
+
+Minor Changes
+-------------
+
+- The collection repository conforms to the `REUSE specification <https://reuse.software/spec/>`__ except for the changelog fragments (https://github.com/ansible-collections/community.docker/pull/462).
+- docker_swarm - allows usage of the ``data_path_port`` parameter when initializing a swarm (https://github.com/ansible-collections/community.docker/issues/296).
+
+v3.0.2
+======
+
+Release Summary
+---------------
+
+Bugfix release.
+
+Bugfixes
+--------
+
+- docker_image - fix build argument handling (https://github.com/ansible-collections/community.docker/issues/455, https://github.com/ansible-collections/community.docker/pull/456).
+
+v3.0.1
+======
+
+Release Summary
+---------------
+
+Bugfix release.
+
+Bugfixes
+--------
+
+- docker_container - fix handling of ``env_file`` (https://github.com/ansible-collections/community.docker/issues/451, https://github.com/ansible-collections/community.docker/pull/452).
+
+v3.0.0
+======
+
+Release Summary
+---------------
+
+The 3.0.0 release features a rewrite of the ``docker_container`` module, and many modules and plugins no longer depend on the Docker SDK for Python.
+
+Major Changes
+-------------
+
+- The collection now contains vendored code from the Docker SDK for Python to talk to the Docker daemon. Modules and plugins using this code no longer need the Docker SDK for Python installed on the machine the module or plugin is running on (https://github.com/ansible-collections/community.docker/pull/398).
+- docker_api connection plugin - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/414).
+- docker_container - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/422).
+- docker_container - the module was completely rewritten from scratch (https://github.com/ansible-collections/community.docker/pull/422).
+- docker_container_exec - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/401).
+- docker_container_info - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/402).
+- docker_containers inventory plugin - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/413).
+- docker_host_info - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/403).
+- docker_image - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/404).
+- docker_image_info - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/405).
+- docker_image_load - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/406).
+- docker_login - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/407).
+- docker_network - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/408).
+- docker_network_info - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/409).
+- docker_plugin - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/429).
+- docker_prune - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/410).
+- docker_volume - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/411).
+- docker_volume_info - no longer uses the Docker SDK for Python. It requires ``requests`` to be installed, and depending on the features used has some more requirements. If the Docker SDK for Python is installed, these requirements are likely met (https://github.com/ansible-collections/community.docker/pull/412).
+
+Minor Changes
+-------------
+
+- All software licenses are now in the ``LICENSES/`` directory of the collection root. Moreover, ``SPDX-License-Identifier:`` is used to declare the applicable license for every file that is not automatically generated (https://github.com/ansible-collections/community.docker/pull/430).
+- Remove vendored copy of ``distutils.version`` in favor of vendored copy included with ansible-core 2.12+. For ansible-core 2.11, uses ``distutils.version`` for Python < 3.12. There is no support for ansible-core 2.11 with Python 3.12+ (https://github.com/ansible-collections/community.docker/pull/271).
+- docker_container - add a new parameter ``image_comparison`` to control the behavior for which image will be used for idempotency checks (https://github.com/ansible-collections/community.docker/issues/421, https://github.com/ansible-collections/community.docker/pull/428).
+- docker_container - add support for ``cgroupns_mode`` (https://github.com/ansible-collections/community.docker/issues/338, https://github.com/ansible-collections/community.docker/pull/427).
+- docker_container - allow to specify ``platform`` (https://github.com/ansible-collections/community.docker/issues/123, https://github.com/ansible-collections/community.docker/pull/426).
+- modules and plugins communicating directly with the Docker daemon - improve default TLS version selection for Python 3.6 and newer. This is only a change relative to older community.docker 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+- modules and plugins communicating directly with the Docker daemon - simplify use of helper function that was removed in Docker SDK for Python to find executables (https://github.com/ansible-collections/community.docker/pull/438).
+- socker_handler and socket_helper module utils - improve Python forward compatibilty, create helper functions for file blocking/unblocking (https://github.com/ansible-collections/community.docker/pull/415).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- This collection does not work with ansible-core 2.11 on Python 3.12+. Please either upgrade to ansible-core 2.12+, or use Python 3.11 or earlier (https://github.com/ansible-collections/community.docker/pull/271).
+- docker_container - ``exposed_ports`` is no longer ignored in ``comparisons``. Before, its value was assumed to be identical with the value of ``published_ports`` (https://github.com/ansible-collections/community.docker/pull/422).
+- docker_container - ``log_options`` can no longer be specified when ``log_driver`` is not specified (https://github.com/ansible-collections/community.docker/pull/422).
+- docker_container - ``publish_all_ports`` is no longer ignored in ``comparisons`` (https://github.com/ansible-collections/community.docker/pull/422).
+- docker_container - ``restart_retries`` can no longer be specified when ``restart_policy`` is not specified (https://github.com/ansible-collections/community.docker/pull/422).
+- docker_container - ``stop_timeout`` is no longer ignored for idempotency if told to be not ignored in ``comparisons``. So far it defaulted to ``ignore`` there, and setting it to ``strict`` had no effect (https://github.com/ansible-collections/community.docker/pull/422).
+- modules and plugins communicating directly with the Docker daemon - when connecting by SSH and not using ``use_ssh_client=true``, reject unknown host keys instead of accepting them. This is only a breaking change relative to older community.docker 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- Execution Environments built with community.docker no longer include docker-compose < 2.0.0. If you need to use it with the ``docker_compose`` module, please install that requirement manually (https://github.com/ansible-collections/community.docker/pull/400).
+- Support for Ansible 2.9 and ansible-base 2.10 has been removed. If you need support for Ansible 2.9 or ansible-base 2.10, please use community.docker 2.x.y (https://github.com/ansible-collections/community.docker/pull/400).
+- Support for Docker API versions 1.20 to 1.24 has been removed. If you need support for these API versions, please use community.docker 2.x.y (https://github.com/ansible-collections/community.docker/pull/400).
+- Support for Python 2.6 has been removed. If you need support for Python 2.6, please use community.docker 2.x.y (https://github.com/ansible-collections/community.docker/pull/400).
+- Various modules - the default of ``tls_hostname`` (``localhost``) has been removed. If you want to continue using ``localhost``, you need to specify it explicitly (https://github.com/ansible-collections/community.docker/pull/363).
+- docker_container - the ``all`` value is no longer allowed in ``published_ports``. Use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/399).
+- docker_container - the default of ``command_handling`` was changed from ``compatibility`` to ``correct``. Older versions were warning for every invocation of the module when this would result in a change of behavior (https://github.com/ansible-collections/community.docker/pull/399).
+- docker_stack - the return values ``out`` and ``err`` have been removed. Use ``stdout`` and ``stderr`` instead (https://github.com/ansible-collections/community.docker/pull/363).
+
+Security Fixes
+--------------
+
+- modules and plugins communicating directly with the Docker daemon - when connecting by SSH and not using ``use_ssh_client=true``, reject unknown host keys instead of accepting them. This is only a change relative to older community.docker 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+
+Bugfixes
+--------
+
+- docker_image - when composing the build context, trim trailing whitespace from ``.dockerignore`` entries. This is only a change relative to older community.docker 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+- docker_plugin - fix crash when handling plugin options (https://github.com/ansible-collections/community.docker/issues/446, https://github.com/ansible-collections/community.docker/pull/447).
+- docker_stack - fix broken string formatting when reporting error in case ``compose`` was containing invalid values (https://github.com/ansible-collections/community.docker/pull/448).
+- modules and plugins communicating directly with the Docker daemon - do not create a subshell for SSH connections when using ``use_ssh_client=true``. This is only a change relative to older community.docker 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+- modules and plugins communicating directly with the Docker daemon - fix ``ProxyCommand`` handling for SSH connections when not using ``use_ssh_client=true``. This is only a change relative to older community.docker 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+- modules and plugins communicating directly with the Docker daemon - fix parsing of IPv6 addresses with a port in ``docker_host``. This is only a change relative to older community.docker 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+- modules and plugins communicating directly with the Docker daemon - prevent crash when TLS is used (https://github.com/ansible-collections/community.docker/pull/432).
+
+v2.7.0
+======
+
+Release Summary
+---------------
+
+Bugfix and deprecation release. The next 2.x.y releases will only be bugfix releases, the next expect minor/major release will be 3.0.0 with some major changes.
+
+Minor Changes
+-------------
+
+- Move common utility functions from the ``common`` module_util to a new module_util called ``util``. This should not have any user-visible effect (https://github.com/ansible-collections/community.docker/pull/390).
+
+Deprecated Features
+-------------------
+
+- Support for Docker API version 1.20 to 1.24 has been deprecated and will be removed in community.docker 3.0.0. The first Docker version supporting API version 1.25 was Docker 1.13, released in January 2017. This affects the modules ``docker_container``, ``docker_container_exec``, ``docker_container_info``, ``docker_compose``, ``docker_login``, ``docker_image``, ``docker_image_info``, ``docker_image_load``, ``docker_host_info``, ``docker_network``, ``docker_network_info``, ``docker_node_info``, ``docker_swarm_info``, ``docker_swarm_service``, ``docker_swarm_service_info``, ``docker_volume_info``, and ``docker_volume``, whose minimally supported API version is between 1.20 and 1.24 (https://github.com/ansible-collections/community.docker/pull/396).
+- Support for Python 2.6 is deprecated and will be removed in the next major release (community.docker 3.0.0). Some modules might still work with Python 2.6, but we will no longer try to ensure compatibility (https://github.com/ansible-collections/community.docker/pull/388).
+
+Bugfixes
+--------
+
+- Docker SDK for Python based modules and plugins - if the API version is specified as an option, use that one to validate API version requirements of module/plugin options instead of the latest API version supported by the Docker daemon. This also avoids one unnecessary API call per module/plugin (https://github.com/ansible-collections/community.docker/pull/389).
+
+v2.6.0
+======
+
+Release Summary
+---------------
+
+Bugfix and feature release.
+
+Minor Changes
+-------------
+
+- docker_container - added ``image_label_mismatch`` parameter (https://github.com/ansible-collections/community.docker/issues/314, https://github.com/ansible-collections/community.docker/pull/370).
+
+Deprecated Features
+-------------------
+
+- Support for Ansible 2.9 and ansible-base 2.10 is deprecated, and will be removed in the next major release (community.docker 3.0.0). Some modules might still work with these versions afterwards, but we will no longer keep compatibility code that was needed to support them (https://github.com/ansible-collections/community.docker/pull/361).
+- The dependency on docker-compose for Execution Environments is deprecated and will be removed in community.docker 3.0.0. The `Python docker-compose library <https://pypi.org/project/docker-compose/>`__ is unmaintained and can cause dependency issues. You can manually still install it in an Execution Environment when needed (https://github.com/ansible-collections/community.docker/pull/373).
+- Various modules - the default of ``tls_hostname`` that was supposed to be removed in community.docker 2.0.0 will now be removed in version 3.0.0 (https://github.com/ansible-collections/community.docker/pull/362).
+- docker_stack - the return values ``out`` and ``err`` that were supposed to be removed in community.docker 2.0.0 will now be removed in version 3.0.0 (https://github.com/ansible-collections/community.docker/pull/362).
+
+Bugfixes
+--------
+
+- docker_container - fail with a meaningful message instead of crashing if a port is specified with more than three colon-separated parts (https://github.com/ansible-collections/community.docker/pull/367, https://github.com/ansible-collections/community.docker/issues/365).
+- docker_container - remove unused code that will cause problems with Python 3.13 (https://github.com/ansible-collections/community.docker/pull/354).
+
+v2.5.1
+======
+
+Release Summary
+---------------
+
+Maintenance release.
+
+Bugfixes
+--------
+
+- Include ``PSF-license.txt`` file for ``plugins/module_utils/_version.py``.
+
+v2.5.0
+======
+
+Release Summary
+---------------
+
+Regular feature release.
+
+Minor Changes
+-------------
+
+- docker_config - add support for ``template_driver`` with one option ``golang`` (https://github.com/ansible-collections/community.docker/issues/332, https://github.com/ansible-collections/community.docker/pull/345).
+- docker_swarm - adds ``data_path_addr`` parameter during swarm initialization or when joining (https://github.com/ansible-collections/community.docker/issues/339).
+
+v2.4.0
+======
+
+Release Summary
+---------------
+
+Regular feature and bugfix release.
+
+Minor Changes
+-------------
+
+- Prepare collection for inclusion in an Execution Environment by declaring its dependencies. The ``docker_stack*`` modules are not supported (https://github.com/ansible-collections/community.docker/pull/336).
+- current_container_facts - add detection for GitHub Actions (https://github.com/ansible-collections/community.docker/pull/336).
+- docker_container - support returning Docker container log output when using Docker's ``local`` logging driver, an optimized local logging driver introduced in Docker 18.09 (https://github.com/ansible-collections/community.docker/pull/337).
+
+Bugfixes
+--------
+
+- docker connection plugin - make sure that ``docker_extra_args`` is used for querying the Docker version. Also ensures that the Docker version is only queried when needed. This is currently the case if a remote user is specified (https://github.com/ansible-collections/community.docker/issues/325, https://github.com/ansible-collections/community.docker/pull/327).
+
+v2.3.0
+======
+
+Release Summary
+---------------
+
+Regular feature and bugfix release.
+
+Minor Changes
+-------------
+
+- docker connection plugin - implement connection reset by clearing internal container user cache (https://github.com/ansible-collections/community.docker/pull/312).
+- docker connection plugin - simplify ``actual_user`` handling code (https://github.com/ansible-collections/community.docker/pull/311).
+- docker connection plugin - the plugin supports new ways to define the timeout. These are the ``ANSIBLE_DOCKER_TIMEOUT`` environment variable, the ``timeout`` setting in the ``docker_connection`` section of ``ansible.cfg``, and the ``ansible_docker_timeout`` variable (https://github.com/ansible-collections/community.docker/pull/297).
+- docker_api connection plugin - implement connection reset by clearing internal container user/group ID cache (https://github.com/ansible-collections/community.docker/pull/312).
+- docker_api connection plugin - the plugin supports new ways to define the timeout. These are the ``ANSIBLE_DOCKER_TIMEOUT`` environment variable, the ``timeout`` setting in the ``docker_connection`` section of ``ansible.cfg``, and the ``ansible_docker_timeout`` variable (https://github.com/ansible-collections/community.docker/pull/308).
+
+Bugfixes
+--------
+
+- docker connection plugin - fix option handling to be compatible with ansible-core 2.13 (https://github.com/ansible-collections/community.docker/pull/297, https://github.com/ansible-collections/community.docker/issues/307).
+- docker_api connection plugin - fix option handling to be compatible with ansible-core 2.13 (https://github.com/ansible-collections/community.docker/pull/308).
+
+v2.2.1
+======
+
+Release Summary
+---------------
+
+Regular bugfix release.
+
+Bugfixes
+--------
+
+- docker_compose - fix Python 3 type error when extracting warnings or errors from docker-compose's output (https://github.com/ansible-collections/community.docker/pull/305).
+
+v2.2.0
+======
+
+Release Summary
+---------------
+
+Regular feature and bugfix release.
+
+Minor Changes
+-------------
+
+- docker_config - add support for rolling update, set ``rolling_versions`` to ``true`` to enable (https://github.com/ansible-collections/community.docker/pull/295, https://github.com/ansible-collections/community.docker/issues/109).
+- docker_secret - add support for rolling update, set ``rolling_versions`` to ``true`` to enable (https://github.com/ansible-collections/community.docker/pull/293, https://github.com/ansible-collections/community.docker/issues/21).
+- docker_swarm_service - add support for setting capabilities with the ``cap_add`` and ``cap_drop`` parameters. Usage is the same as with the ``capabilities`` and ``cap_drop`` parameters for ``docker_container`` (https://github.com/ansible-collections/community.docker/pull/294).
+
+Bugfixes
+--------
+
+- docker_container, docker_image - adjust image finding code to pecularities of ``podman-docker``'s API emulation when Docker short names like ``redis`` are used (https://github.com/ansible-collections/community.docker/issues/292).
+
+v2.1.1
+======
+
+Release Summary
+---------------
+
+Emergency release to amend breaking change in previous release.
+
+Bugfixes
+--------
+
+- Fix unintended breaking change caused by `an earlier fix <https://github.com/ansible-collections/community.docker/pull/258>`_ by vendoring the deprecated Python standard library ``distutils.version`` until this collection stops supporting Ansible 2.9 and ansible-base 2.10 (https://github.com/ansible-collections/community.docker/issues/267, https://github.com/ansible-collections/community.docker/pull/269).
+
+v2.1.0
+======
+
+Release Summary
+---------------
+
+Feature and bugfix release.
+
+Minor Changes
+-------------
+
+- docker_container_exec - add ``detach`` parameter (https://github.com/ansible-collections/community.docker/issues/250, https://github.com/ansible-collections/community.docker/pull/255).
+- docker_container_exec - add ``env`` option (https://github.com/ansible-collections/community.docker/issues/248, https://github.com/ansible-collections/community.docker/pull/254).
+
+Bugfixes
+--------
+
+- Various modules and plugins - use vendored version of ``distutils.version`` included in ansible-core 2.12 if available. This avoids breakage when ``distutils`` is removed from the standard library of Python 3.12. Note that ansible-core 2.11, ansible-base 2.10 and Ansible 2.9 are right now not compatible with Python 3.12, hence this fix does not target these ansible-core/-base/2.9 versions (https://github.com/ansible-collections/community.docker/pull/258).
+- docker connection plugin - replace deprecated ``distutils.spawn.find_executable`` with Ansible's ``get_bin_path`` to find the ``docker`` executable (https://github.com/ansible-collections/community.docker/pull/257).
+- docker_container_exec - disallow using the ``chdir`` option for Docker API before 1.35 (https://github.com/ansible-collections/community.docker/pull/253).
+
+v2.0.2
+======
+
+Release Summary
+---------------
+
+Bugfix release.
+
+Bugfixes
+--------
+
+- docker_api connection plugin - avoid passing an unnecessary argument to a Docker SDK for Python call that is only supported by version 3.0.0 or later (https://github.com/ansible-collections/community.docker/pull/243).
+- docker_container_exec - ``chdir`` is only supported since Docker SDK for Python 3.0.0. Make sure that this option can only use when 3.0.0 or later is installed, and prevent passing this parameter on when ``chdir`` is not provided to this module (https://github.com/ansible-collections/community.docker/pull/243, https://github.com/ansible-collections/community.docker/issues/242).
+- nsenter connection plugin - ensure the ``nsenter_pid`` option is retrieved in ``_connect`` instead of ``__init__`` to prevent a crasher due to bad initialization order (https://github.com/ansible-collections/community.docker/pull/249).
+- nsenter connection plugin - replace the use of ``--all-namespaces`` with specific namespaces to support compatibility with Busybox nsenter (used on, for example, Alpine containers) (https://github.com/ansible-collections/community.docker/pull/249).
+
+v2.0.1
+======
+
+Release Summary
+---------------
+
+Maintenance release with some documentation fixes.
+
+v2.0.0
+======
+
+Release Summary
+---------------
+
+New major release with some deprecations removed and a breaking change in the ``docker_compose`` module regarding the ``timeout`` parameter.
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- docker_compose - fixed ``timeout`` defaulting behavior so that ``stop_grace_period``, if defined in the compose file, will be used if `timeout`` is not specified (https://github.com/ansible-collections/community.docker/pull/163).
+
+Deprecated Features
+-------------------
+
+- docker_container - using the special value ``all`` in ``published_ports`` has been deprecated. Use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/210).
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- docker_container - the default value of ``container_default_behavior`` changed to ``no_defaults`` (https://github.com/ansible-collections/community.docker/pull/210).
+- docker_container - the default value of ``network_mode`` is now the name of the first network specified in ``networks`` if such are specified and ``networks_cli_compatible=true`` (https://github.com/ansible-collections/community.docker/pull/210).
+- docker_container - the special value ``all`` can no longer be used in ``published_ports`` next to other values. Please use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/210).
+- docker_login - removed the ``email`` option (https://github.com/ansible-collections/community.docker/pull/210).
+
+v1.10.0
+=======
+
+Release Summary
+---------------
+
+Regular feature and bugfix release.
+
+Minor Changes
+-------------
+
+- Add the modules docker_container_exec, docker_image_load and docker_plugin to the ``docker`` module defaults group (https://github.com/ansible-collections/community.docker/pull/209).
+- docker_config - add option ``data_src`` to read configuration data from target (https://github.com/ansible-collections/community.docker/issues/64, https://github.com/ansible-collections/community.docker/pull/203).
+- docker_secret - add option ``data_src`` to read secret data from target (https://github.com/ansible-collections/community.docker/issues/64, https://github.com/ansible-collections/community.docker/pull/203).
+
+v1.9.1
+======
+
+Release Summary
+---------------
+
+Regular bugfix release.
+
+Bugfixes
+--------
+
+- docker_compose - fixed incorrect ``changed`` status for services with ``profiles`` defined, but none enabled (https://github.com/ansible-collections/community.docker/pull/192).
+
+v1.9.0
+======
+
+Release Summary
+---------------
+
+New bugfixes and features release.
+
+Minor Changes
+-------------
+
+- docker_* modules - include ``ImportError`` traceback when reporting that Docker SDK for Python could not be found (https://github.com/ansible-collections/community.docker/pull/188).
+- docker_compose - added ``env_file`` option for specifying custom environment files (https://github.com/ansible-collections/community.docker/pull/174).
+- docker_container - added ``publish_all_ports`` option to publish all exposed ports to random ports except those explicitly bound with ``published_ports`` (this was already added in community.docker 1.8.0) (https://github.com/ansible-collections/community.docker/pull/162).
+- docker_container - added new ``command_handling`` option with current deprecated default value ``compatibility`` which allows to control how the module handles shell quoting when interpreting lists, and how the module handles empty lists/strings. The default will switch to ``correct`` in community.docker 3.0.0 (https://github.com/ansible-collections/community.docker/pull/186).
+- docker_container - lifted restriction preventing the creation of anonymous volumes with the ``mounts`` option (https://github.com/ansible-collections/community.docker/pull/181).
+
+Deprecated Features
+-------------------
+
+- docker_container - the new ``command_handling``'s default value, ``compatibility``, is deprecated and will change to ``correct`` in community.docker 3.0.0. A deprecation warning is emitted by the module in cases where the behavior will change. Please note that ansible-core will output a deprecation warning only once, so if it is shown for an earlier task, there could be more tasks with this warning where it is not shown (https://github.com/ansible-collections/community.docker/pull/186).
+
+Bugfixes
+--------
+
+- docker_compose - fixes task failures when bringing up services while using ``docker-compose <1.17.0`` (https://github.com/ansible-collections/community.docker/issues/180).
+- docker_container - make sure to also return ``container`` on ``detached=false`` when status code is non-zero (https://github.com/ansible-collections/community.docker/pull/178).
+- docker_stack_info - make sure that module isn't skipped in check mode (https://github.com/ansible-collections/community.docker/pull/183).
+- docker_stack_task_info - make sure that module isn't skipped in check mode (https://github.com/ansible-collections/community.docker/pull/183).
+
+New Plugins
+-----------
+
+Connection
+~~~~~~~~~~
+
+- nsenter - execute on host running controller container
+
+v1.8.0
+======
+
+Release Summary
+---------------
+
+Regular bugfix and feature release.
+
+Minor Changes
+-------------
+
+- Avoid internal ansible-core module_utils in favor of equivalent public API available since at least Ansible 2.9 (https://github.com/ansible-collections/community.docker/pull/164).
+- docker_compose - added ``profiles`` option to specify service profiles when starting services (https://github.com/ansible-collections/community.docker/pull/167).
+- docker_containers inventory plugin - when ``connection_type=docker-api``, now pass Docker daemon connection options from inventory plugin to connection plugin. This can be disabled by setting ``configure_docker_daemon=false`` (https://github.com/ansible-collections/community.docker/pull/157).
+- docker_host_info - allow values for keys in ``containers_filters``, ``images_filters``, ``networks_filters``, and ``volumes_filters`` to be passed as YAML lists (https://github.com/ansible-collections/community.docker/pull/160).
+- docker_plugin - added ``alias`` option to specify local names for docker plugins (https://github.com/ansible-collections/community.docker/pull/161).
+
+Bugfixes
+--------
+
+- docker_compose - fix idempotence bug when using ``stopped: true`` (https://github.com/ansible-collections/community.docker/issues/142, https://github.com/ansible-collections/community.docker/pull/159).
+
+v1.7.0
+======
+
+Release Summary
+---------------
+
+Small feature and bugfix release.
+
+Minor Changes
+-------------
+
+- docker_image - allow to tag images by ID (https://github.com/ansible-collections/community.docker/pull/149).
+
+v1.6.1
+======
+
+Release Summary
+---------------
+
+Bugfix release to reduce deprecation warning spam.
+
+Bugfixes
+--------
+
+- docker_* modules and plugins, except ``docker_swarm`` connection plugin and ``docker_compose`` and ``docker_stack*` modules - only emit ``tls_hostname`` deprecation message if TLS is actually used (https://github.com/ansible-collections/community.docker/pull/143).
+
+v1.6.0
+======
+
+Release Summary
+---------------
+
+Regular bugfix and feature release.
+
+Minor Changes
+-------------
+
+- common module utils - correct error messages for guiding to install proper Docker SDK for Python module (https://github.com/ansible-collections/community.docker/pull/125).
+- docker_container - allow ``memory_swap: -1`` to set memory swap limit to unlimited. This is useful when the user cannot set memory swap limits due to cgroup limitations or other reasons, as by default Docker will try to set swap usage to two times the value of ``memory`` (https://github.com/ansible-collections/community.docker/pull/138).
+
+Deprecated Features
+-------------------
+
+- docker_* modules and plugins, except ``docker_swarm`` connection plugin and ``docker_compose`` and ``docker_stack*` modules - the current default ``localhost`` for ``tls_hostname`` is deprecated. In community.docker 2.0.0 it will be computed from ``docker_host`` instead (https://github.com/ansible-collections/community.docker/pull/134).
+
+Bugfixes
+--------
+
+- docker-compose - fix not pulling when ``state: present`` and ``stopped: true`` (https://github.com/ansible-collections/community.docker/issues/12, https://github.com/ansible-collections/community.docker/pull/119).
+- docker_plugin - also configure plugin after installing (https://github.com/ansible-collections/community.docker/issues/118, https://github.com/ansible-collections/community.docker/pull/135).
+- docker_swarm_services - avoid crash during idempotence check if ``published_port`` is not specified (https://github.com/ansible-collections/community.docker/issues/107, https://github.com/ansible-collections/community.docker/pull/136).
+
+v1.5.0
+======
+
+Release Summary
+---------------
+
+Regular feature release.
+
+Minor Changes
+-------------
+
+- Add the ``use_ssh_client`` option to most docker modules and plugins (https://github.com/ansible-collections/community.docker/issues/108, https://github.com/ansible-collections/community.docker/pull/114).
+
+Bugfixes
+--------
+
+- all modules - use ``to_native`` to convert exceptions to strings (https://github.com/ansible-collections/community.docker/pull/121).
+
+New Modules
+-----------
+
+- docker_container_exec - Execute command in a docker container
+
+v1.4.0
+======
+
+Release Summary
+---------------
+
+Security release to address another potential secret leak. Also includes regular bugfixes and features.
+
+Minor Changes
+-------------
+
+- docker_swarm_service - change ``publish.published_port`` option from mandatory to optional. Docker will assign random high port if not specified (https://github.com/ansible-collections/community.docker/issues/99).
+
+Breaking Changes / Porting Guide
+--------------------------------
+
+- docker_swarm - if ``join_token`` is specified, a returned join token with the same value will be replaced by ``VALUE_SPECIFIED_IN_NO_LOG_PARAMETER``. Make sure that you do not blindly use the join tokens from the return value of this module when the module is invoked with ``join_token`` specified! This breaking change appears in a minor release since it is necessary to fix a security issue (https://github.com/ansible-collections/community.docker/pull/103).
+
+Security Fixes
+--------------
+
+- docker_swarm - the ``join_token`` option is now marked as ``no_log`` so it is no longer written into logs (https://github.com/ansible-collections/community.docker/pull/103).
+
+Bugfixes
+--------
+
+- ``docker_swarm_service`` - fix KeyError on caused by reference to deprecated option ``update_failure_action`` (https://github.com/ansible-collections/community.docker/pull/100).
+- docker_swarm_service - mark ``secrets`` module option with ``no_log=False`` since it does not leak secrets (https://github.com/ansible-collections/community.general/pull/2001).
+
+v1.3.0
+======
+
+Release Summary
+---------------
+
+Regular feature and bugfix release.
+
+Minor Changes
+-------------
+
+- docker_container - add ``storage_opts`` option to specify storage options (https://github.com/ansible-collections/community.docker/issues/91, https://github.com/ansible-collections/community.docker/pull/93).
+- docker_image - allows to specify platform to pull for ``source=pull`` with new option ``pull_platform`` (https://github.com/ansible-collections/community.docker/issues/79, https://github.com/ansible-collections/community.docker/pull/89).
+- docker_image - properly support image IDs (hashes) for loading and tagging images (https://github.com/ansible-collections/community.docker/issues/86, https://github.com/ansible-collections/community.docker/pull/87).
+- docker_swarm_service - adding support for maximum number of tasks per node (``replicas_max_per_node``) when running swarm service in replicated mode. Introduced in API 1.40 (https://github.com/ansible-collections/community.docker/issues/7, https://github.com/ansible-collections/community.docker/pull/92).
+
+Bugfixes
+--------
+
+- docker_container - fix healthcheck disabling idempotency issue with strict comparison (https://github.com/ansible-collections/community.docker/issues/85).
+- docker_image - prevent module failure when removing image that is removed between inspection and removal (https://github.com/ansible-collections/community.docker/pull/87).
+- docker_image - prevent module failure when removing non-existant image by ID (https://github.com/ansible-collections/community.docker/pull/87).
+- docker_image_info - prevent module failure when image vanishes between listing and inspection (https://github.com/ansible-collections/community.docker/pull/87).
+- docker_image_info - prevent module failure when querying non-existant image by ID (https://github.com/ansible-collections/community.docker/pull/87).
+
+New Modules
+-----------
+
+- docker_image_load - Load docker image(s) from archives
+- docker_plugin - Manage Docker plugins
+
+v1.2.2
+======
+
+Release Summary
+---------------
+
+Security bugfix release to address CVE-2021-20191.
+
+Security Fixes
+--------------
+
+- docker_swarm - enabled ``no_log`` for the option ``signing_ca_key`` to prevent accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.docker/pull/80).
+
+v1.2.1
+======
+
+Release Summary
+---------------
+
+Bugfix release.
+
+Bugfixes
+--------
+
+- docker connection plugin - fix Docker version parsing, as some docker versions have a leading ``v`` in the output of the command ``docker version --format "{{.Server.Version}}"`` (https://github.com/ansible-collections/community.docker/pull/76).
+
+v1.2.0
+======
+
+Release Summary
+---------------
+
+Feature release with one new feature and two bugfixes.
+
+Minor Changes
+-------------
+
+- docker_container - added ``default_host_ip`` option which allows to explicitly set the default IP string for published ports without explicitly specified IPs. When using IPv6 binds with Docker 20.10.2 or newer, this needs to be set to an empty string (``""``) (https://github.com/ansible-collections/community.docker/issues/70, https://github.com/ansible-collections/community.docker/pull/71).
+
+Bugfixes
+--------
+
+- docker_container - allow IPv6 zones (RFC 4007) in bind IPs (https://github.com/ansible-collections/community.docker/pull/66).
+- docker_image - fix crash on loading images with versions of Docker SDK for Python before 2.5.0 (https://github.com/ansible-collections/community.docker/issues/72, https://github.com/ansible-collections/community.docker/pull/73).
+
+v1.1.0
+======
+
+Release Summary
+---------------
+
+Feature release with three new plugins and modules.
+
+Minor Changes
+-------------
+
+- docker_container - support specifying ``cgroup_parent`` (https://github.com/ansible-collections/community.docker/issues/6, https://github.com/ansible-collections/community.docker/pull/59).
+- docker_container - when a container is started with ``detached=false``, ``status`` is now also returned when it is 0 (https://github.com/ansible-collections/community.docker/issues/26, https://github.com/ansible-collections/community.docker/pull/58).
+- docker_image - support ``platform`` when building images (https://github.com/ansible-collections/community.docker/issues/22, https://github.com/ansible-collections/community.docker/pull/54).
+
+Deprecated Features
+-------------------
+
+- docker_container - currently ``published_ports`` can contain port mappings next to the special value ``all``, in which case the port mappings are ignored. This behavior is deprecated for community.docker 2.0.0, at which point it will either be forbidden, or this behavior will be properly implemented similar to how the Docker CLI tool handles this (https://github.com/ansible-collections/community.docker/issues/8, https://github.com/ansible-collections/community.docker/pull/60).
+
+Bugfixes
+--------
+
+- docker_image - if ``push=true`` is used with ``repository``, and the image does not need to be tagged, still push. This can happen if ``repository`` and ``name`` are equal (https://github.com/ansible-collections/community.docker/issues/52, https://github.com/ansible-collections/community.docker/pull/53).
+- docker_image - report error when loading a broken archive that contains no image (https://github.com/ansible-collections/community.docker/issues/46, https://github.com/ansible-collections/community.docker/pull/55).
+- docker_image - report error when the loaded archive does not contain the specified image (https://github.com/ansible-collections/community.docker/issues/41, https://github.com/ansible-collections/community.docker/pull/55).
+
+New Plugins
+-----------
+
+Connection
+~~~~~~~~~~
+
+- docker_api - Run tasks in docker containers
+
+Inventory
+~~~~~~~~~
+
+- docker_containers - Ansible dynamic inventory plugin for Docker containers.
+
+New Modules
+-----------
+
+- current_container_facts - Return facts about whether the module runs in a Docker container
+
+v1.0.1
+======
+
+Release Summary
+---------------
+
+Maintenance release with a bugfix for ``docker_container``.
+
+Bugfixes
+--------
+
+- docker_container - the validation for ``capabilities`` in ``device_requests`` was incorrect (https://github.com/ansible-collections/community.docker/issues/42, https://github.com/ansible-collections/community.docker/pull/43).
+
+v1.0.0
+======
+
+Release Summary
+---------------
+
+This is the first production (non-prerelease) release of ``community.docker``.
+
+
+Minor Changes
+-------------
+
+- Add collection-side support of the ``docker`` action group / module defaults group (https://github.com/ansible-collections/community.docker/pull/17).
+- docker_image - return docker build output (https://github.com/ansible-collections/community.general/pull/805).
+- docker_secret - add a warning when the secret does not have an ``ansible_key`` label but the ``force`` parameter is not set (https://github.com/ansible-collections/community.docker/issues/30, https://github.com/ansible-collections/community.docker/pull/31).
+
+v0.1.0
+======
+
+Release Summary
+---------------
+
+The ``community.docker`` continues the work on the Ansible docker modules and plugins from their state in ``community.general`` 1.2.0. The changes listed here are thus relative to the modules and plugins ``community.general.docker*``.
+
+All deprecation removals planned for ``community.general`` 2.0.0 have been applied. All deprecation removals scheduled for ``community.general`` 3.0.0 have been re-scheduled for ``community.docker`` 2.0.0.
+
+
+Minor Changes
+-------------
+
+- docker_container - now supports the ``device_requests`` option, which allows to request additional resources such as GPUs (https://github.com/ansible/ansible/issues/65748, https://github.com/ansible-collections/community.general/pull/1119).
+
+Removed Features (previously deprecated)
+----------------------------------------
+
+- docker_container - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_container - the default of ``networks_cli_compatible`` changed to ``true`` (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_container - the unused option ``trust_image_content`` has been removed (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_image - ``state=build`` has been removed. Use ``present`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_image - the ``container_limits``, ``dockerfile``, ``http_timeout``, ``nocache``, ``rm``, ``path``, ``buildargs``, ``pull`` have been removed. Use the corresponding suboptions of ``build`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_image - the ``force`` option has been removed. Use the more specific ``force_*`` options instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_image - the ``source`` option is now mandatory (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_image - the ``use_tls`` option has been removed. Use ``tls`` and ``validate_certs`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_image - the default of the ``build.pull`` option changed to ``false`` (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_image_facts - this alias is on longer availabe, use ``docker_image_info`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_network - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_network - the ``ipam_options`` option has been removed. Use ``ipam_config`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_service - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_swarm - ``state=inspect`` has been removed. Use ``docker_swarm_info`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_swarm_service - the ``constraints`` option has been removed. Use ``placement.constraints`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_swarm_service - the ``limit_cpu`` and ``limit_memory`` options has been removed. Use the corresponding suboptions in ``limits`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_swarm_service - the ``log_driver`` and ``log_driver_options`` options has been removed. Use the corresponding suboptions in ``logging`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_swarm_service - the ``reserve_cpu`` and ``reserve_memory`` options has been removed. Use the corresponding suboptions in ``reservations`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_swarm_service - the ``restart_policy``, ``restart_policy_attempts``, ``restart_policy_delay`` and ``restart_policy_window`` options has been removed. Use the corresponding suboptions in ``restart_config`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_swarm_service - the ``update_delay``, ``update_parallelism``, ``update_failure_action``, ``update_monitor``, ``update_max_failure_ratio`` and ``update_order`` options has been removed. Use the corresponding suboptions in ``update_config`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_volume - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
+- docker_volume - the ``force`` option has been removed. Use ``recreate`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+
+Bugfixes
+--------
+
+- docker_login - fix internal config file storage to handle credentials for more than one registry (https://github.com/ansible-collections/community.general/issues/1117).
diff --git a/ansible_collections/community/docker/CHANGELOG.rst.license b/ansible_collections/community/docker/CHANGELOG.rst.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/CHANGELOG.rst.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/COPYING b/ansible_collections/community/docker/COPYING
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/ansible_collections/community/docker/COPYING
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/community/docker/FILES.json b/ansible_collections/community/docker/FILES.json
new file mode 100644
index 00000000..d67c91a0
--- /dev/null
+++ b/ansible_collections/community/docker/FILES.json
@@ -0,0 +1,5206 @@
+{
+ "files": [
+ {
+ "name": ".",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/aggregate-coverage.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "db8ad813c81f9e23c821e10d4c6d91ccb4f0540c0eb3a21efe8fe1242346fa84",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/combine-coverage.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49d3567c21d253290b5c075e250b1460ea46c3f33b7d25a8994447824aa19279",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/process-results.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "360f2205a9559f75b003df8e62099d6df1cee1953bc15b6a4a561d18cab2018a",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/publish-codecov.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31c38e5c2f6021b3762c52c5c55f9fb565586b69e998b36519d3072608564546",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/report-coverage.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6450729de6c27bb170d308857bbf1c685fcd77cb0ea430cc0c21532d334a6893",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/run-tests.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f0fcf44f253e20ca994b1ef3cf69dea830445af3116b8cdd9d7423e8ef240873",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/scripts/time-command.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "486dd0a00417773b1a8901b4d411cb82f7e3ffea636ed69163743c4b43bf683a",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/coverage.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2cfdbf962bd04a21836ab1cd333dbf5dde408c50d3b1ec12231ed55172fd800c",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/matrix.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d41651c12445067921426ce3de9661cc9dbab2e5ad3b2650edbb444371cac14",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/templates/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e899c99995b9bda705f85203a5b851b833cc407a1d619c8da0aaaf92b1835eba",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1e68364750709fae75220652b5837198a1deff224fa37d4147eec37a7bcddd70",
+ "format": 1
+ },
+ {
+ "name": ".azure-pipelines/azure-pipelines.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb824ef65e02f7d719a8db002699d5727b2df9260dd4f68d62193a91be12a76e",
+ "format": 1
+ },
+ {
+ "name": ".github",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ansible-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "62fb84f2eb465b21006e99a13957702c14cb63fc530dcd88ac81b438681da7c9",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/docs-pr.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "754a357ab5c3a5fd1dd782ade11220decb76a24bfb4452a9a15d9a7fec1f863e",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/docs-push.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8e50964ae57efe54009281fbd8f6f6f3bfa493821a5da63c08635afed76fc8f2",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/ee.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3c7dcf434e73aaeb8526ee30db2e97b0f80676b1f18d8780224a811f97fb213",
+ "format": 1
+ },
+ {
+ "name": ".github/workflows/reuse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e8a5666a792db2dbbb984d861032728af7523bd05eb64f72d5fe24d7888c4a85",
+ "format": 1
+ },
+ {
+ "name": ".github/dependabot.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f314f2fce46d346ef9559fdeae3a55ce6d799bab45a255092446c31e17e6874d",
+ "format": 1
+ },
+ {
+ "name": ".github/patchback.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a23e48e2562604318540e6ddcac75213ad2c367258d76fc75914e9b939d380e",
+ "format": 1
+ },
+ {
+ "name": ".reuse",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": ".reuse/dep5",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d8cb20d72d7e81aaf2e7f0ddce7eacdb3d47a890541717d7fae08a6cab7ebed",
+ "format": 1
+ },
+ {
+ "name": "LICENSES",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "LICENSES/GPL-3.0-or-later.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": "LICENSES/Apache-2.0.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f2f0b07fa5e492c11d27aa0d2f3f1a0e64b9d17f32d8aa489ae2af9609af33b2",
+ "format": 1
+ },
+ {
+ "name": "changelogs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "changelogs/fragments/.keep",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b9c41a4443308c6d7b2bb49d717e37a676a27471fb596247a35962cdc649e374",
+ "format": 1
+ },
+ {
+ "name": "changelogs/changelog.yaml.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "changelogs/config.yaml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e608bfe9368cd9a95e04b99ec78d74bd7c8e125cc715c059a1957bb32079c102",
+ "format": 1
+ },
+ {
+ "name": "docs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/rst/scenario_guide.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "11df82606883ecfa82952f141ed9c30ac31e07746dcd50edc53ca8dd3a360a2e",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/extra-docs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "abe1ed63f09f1504b11896c80b70b688bb42cd92074a56a893bcdccaeaa9569b",
+ "format": 1
+ },
+ {
+ "name": "docs/docsite/links.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4120f3e745957bc2b3d4b0c273eede4e4a531d65f2fd2c40b5cc094c5cc7c627",
+ "format": 1
+ },
+ {
+ "name": "meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "meta/ee-bindep.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "02de562c6658595ae581a5896e447a309c0b6a648ced86815ee3b271c52f0850",
+ "format": 1
+ },
+ {
+ "name": "meta/ee-requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05d1b11fabc1836d30b2577353404ee8cedab413bf02874413538f77c882ce06",
+ "format": 1
+ },
+ {
+ "name": "meta/execution-environment.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f42b4946cc8e628812ba87c2d36dbd5ed7af316d5c68bba8fd7d72808254f967",
+ "format": 1
+ },
+ {
+ "name": "meta/runtime.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "036384152d6b46d0edefb1a12d1dfef3c85205511e5db3319d49d17fa342f97f",
+ "format": 1
+ },
+ {
+ "name": "plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/action",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/action/docker_container_copy_into.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1c50124cc05fb7aece01e325768ce1683916f5c620545c6ff094756a3f58e29e",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/docker.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b33a9616e9189aed4014f1869666bec5b788dba44385ecf3cbeed4e30f4ffe8",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/docker_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "aee7c8be4996a947de9457fbad1df113b7626f2ccbc5c629a060057c2ec14b43",
+ "format": 1
+ },
+ {
+ "name": "plugins/connection/nsenter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ca5d2af831451b5d3d38fcf237314b0d86f85bea38d589f91b249e7d5f34031",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/attributes.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3cb11d4fbb64f22b4c70fb7baf8f8777a21d99887383234e1af7945f45ca95a6",
+ "format": 1
+ },
+ {
+ "name": "plugins/doc_fragments/docker.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f164958068db65e0d813835b35ea9b057c1f45cabaaa5e5e34d7b6b22b2e3ade",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/docker_containers.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5e48e19c7f841b06e67952cd5ae4897e9da72909bcf8e739453290a58ffac6e",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/docker_machine.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63788182b475bbb689919fa08b9753c3cba026937a6d7be2ee5238727d583e53",
+ "format": 1
+ },
+ {
+ "name": "plugins/inventory/docker_swarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d17d6b8df09dcf99c45f2b077b6077b2cd0e319d99b7be0a46859e9855b4465",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/api",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/api/client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "957bfbb66512485e7b324be0c00e4c96a0db8a99f0d175732ee64ebc6fce29c6",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/api/daemon.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3ff41f499911eba707f23fe05062adf3d5c7464138111c37d1afc30f913a2f8",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/credentials",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/credentials/constants.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e27e1fa309f05d152f1913e4bfa3148b5616f01026ec6bcc99c150d2bf287c22",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/credentials/errors.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9bbefc587e81919ee2742084789f5ec77dfbc7b71edce3d4b82e3ef79573adc4",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/credentials/store.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd210d9c0262e0d3b053da2a65164df75f55646665ba7c97c28dd430f42f4159",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/credentials/utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17d2fa82ff80339b73650e34b2905b77d2d8bfce11a9d45b40f874d5103fe59f",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/transport",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/transport/basehttpadapter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73da87364c86548cdf16f52608349443f02f9926b52e736649a417f1238376ea",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/transport/npipeconn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "086da24a2e7ec6f6ae33cccc308cc8e4103d417950d3ee557454b58e21e54e5c",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/transport/npipesocket.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "058e69397607921c0c87b74ae381948b19ac2838c13066145712cf60dc52a2d9",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/transport/sshconn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8374d65854ab0bcc9df84abd03d4ad0fa434932818cbf5e4881d8439b5779f82",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/transport/ssladapter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "44b20375c0f42347bd84f6e855d4a4042b5cf6e83f0f49199ea04cb8c6578130",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/transport/unixconn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5010ffb01c3ae3ca789c31aba00eefb1d4874f6992abd37aa2855b6dccbb3162",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/types",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/types/daemon.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f2f62cf8368b5763fdd37928be5fc1496c76120c98253a6b7abc25d4944f036",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils/build.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2384f3ba9a75a65db4cab0a3bff25262cb37deaa182b47816715e73f483ca812",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils/config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf89f579d69305de6b158d5a6cffa8a151537826b7b29a46c2f480cae9acd0c4",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils/decorators.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8908e147a40b06230a5e4792c4138f2ec56cc2aba5df18ae80e5746199b5d474",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils/fnmatch.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a52bdaf477671ebc49186f8e7a2a0c6a3d014d544fc0ca1117896b3bd3c5333",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils/json_stream.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9db37ffdbff88e72fdc8b903c107a3a3b2369a3102450152e1fe1feafdc40c28",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils/ports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ce0e92cc6663e0e58f0b34586ecc4c1436d2bf8d86b7d569a0a6cb65fc12000a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils/proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4f6df3b56b77ef65add7f795d445ddf88c74e12ab93f8880f2692c70915820ea",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils/socket.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4775d6926a42581791cca5ba175c99fbdd8e204a0091bb74e27b5971b7d158ec",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/utils/utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f11f1f6efa555721dcad3244256e97daaddf282e81d9a9a16683aa9d11ff1973",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/_import_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a8fc654b899aad3b61a66a2a6e74be9c7ed7433fc6c08f192a12932b5e7c8ad",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "050402e1503e0efc852051bf403a44d149d0d4770accc9f2888ecff8a5f3a5b4",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/constants.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2876120cf27b5452728357de13d552aa5987dcb8a2a0942fdd8594ba6c17ea23",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/errors.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36dcebce54b43306c9bd14c64786c14c9e91b60cdc60747f2d531861eb43d90a",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_api/tls.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "23a55b15907f6d866d17f366af10e6efc0e618b207a53d3dd59ae2698cea384e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/module_container",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/module_container/base.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee59b15754a61eb80299496b9ff66b8724faa2cc62580a627ce0c3ea2a147a8e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/module_container/docker_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "158af3f302bad3889571eb688dd61d2c0c14f97806fdf73e6f02c445766c25e1",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/module_container/module.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ec66e374d210e738d7aaea787d1bc42a547f7067acabeb6253f7fb0b8970dd18",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/_scramble.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "77123e4994f234b5a2f54065e80f623b307941919418c2c4092f3f3e9a10d0ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86b8ea61f7933baa3a681810ffbb49a28f45f4d71d09023b093093d96acde20e",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/common_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3eded77a2c9e9987e6690f1bfb17b8ebaf8eb5aba712ae2d9417cce1a116e301",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/copy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c774e3ecf789fa2240be6178a4aae6bc571ff4809a79497446712ce38876526c",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/image_archive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7c6784aecdfc14661d55b3ddef4e0c19e6a44bb0fecdbc514834c571716e8711",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/socket_handler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0913f168491236b76751b68abab54cc332d09a580e9fc9e6ae52e92916c850fc",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/socket_helper.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2bdd8d9601b7e04e587904c9474c147d5cbade2e7175ad2e1ff31a2f77abec59",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/swarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3045e4e7bef80c90ac21b85247d489af34819488809f205568f099d08e3829bf",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/util.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "36b3fed68a9a09ffe13fd31c72531f2428db92417f8b39d3a98b8914485c3a92",
+ "format": 1
+ },
+ {
+ "name": "plugins/module_utils/version.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9b898a25d61e52c8b31972c9b66f8990a264f17671b6d544e107b77c3dc42ed",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/current_container_facts.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f8859f459d86a9cea8acc478f6cca9d9afbdb8fdb3de67dd7c26b519e8266e5b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_compose.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d644833d8c906b405b653999f5589b2fdd4b0c8990ac3f75cfd1474bd3aaf654",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d332e43cf754184bcc99367c2b256408d60baeaad3b1f45eceef634748cd9eb5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_container.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e47b110ea04f3db21c4c84ac913d587191bf32c72d70f11819bca7a721135458",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_container_copy_into.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dfeac79e2e0e22a99815fce943475665427a528dffce4c9804e401eb5b735053",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_container_exec.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "95e062fb3d0b1a4297146d98b8fbf10ba5275ff91f68654e3f06f6c9565c93fd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_container_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cf3a704310a9a0ac7d02af0fc9b07cfb9d3ce65cdab9ba597161f25bcb493c33",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_host_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6bfafc6e0fd9ae42abf9bae05c65200b8e94a9beca4108c6785c1a3843546c2a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6e5fe1c3bdef812f6e58d89084958948d2a77000b2316090e4309035e95635b1",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_image_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cee8e231c2ddc6330958d95c830a523cfb5663284b2fe7f9a8b9f43948037df5",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_image_load.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc180605dc910fedd7c3e9b75ce91c2de46a3eb6df014f7666345c074f621f8f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_login.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1f09ecf54b177ba5830773fdfb7092cf54ab207f33522a603201a4fce94a42b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "03cb04a8a13c033237eddc478b5944e32a571ae67cde6004b923e08083bdf1ca",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_network_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3a67bff7fa7f1a2f0d55b5c34187699b0794e67123bfdb86bc3da50a9296e413",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_node.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "98e369f1bcf1caded3d21f38724c6187a7705a0bae647a57edade757e2679ec8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_node_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5875c7b1b2a5f080277de528ffb22f70540ea19d3a25583b25cc6b1046573517",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_plugin.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9ea2030d275bf6ac19f7bf0fe7fc1b0dcff4d7cb0b15cea124921e99732d1abb",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_prune.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9315eb36ddab089ddcceba06cf8c70faf3a4d0d77ecce0d9a133716ee4bc1dc8",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_secret.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe578e93cd122a530a0b1880aaaeb6286ea06c9ce7d752345094a3ea0f63d00f",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_stack.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b0ee4740452db238cc6799510b3fccd6e06fb933b7053117bf62dccedc28f66",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_stack_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5c8ad7b526a9903d4c334225e81c1b794f72927e7ca1bd318400f6f4d1a35a23",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_stack_task_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34cdb57ca28f881e6069bb9f84f452c5f3b340f6f19afb09e3b7b8ebb918379a",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_swarm.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd309b6668d3bcd4561b3462e90780b99b6d76b3c7612f39f1ce45fbe98bb6b0",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_swarm_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a028695e7b97b4cd117287ffd3542773f098fa1c53e889c1801111a192869a29",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_swarm_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "05ecedb2d7f2f47dd49be051fa8da4723e63b06624282724bf9bdd5ca603e23c",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_swarm_service_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a67a4a18d2482ce4a624d93351c6d013780351e531eeed4a8507e0d03b871a4b",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_volume.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1d59a481f217bd40f4b9188b540490285a7bdedd0a6e830b5942f661006630dd",
+ "format": 1
+ },
+ {
+ "name": "plugins/modules/docker_volume_info.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e6727d7a596134f2f4f7116e42081ad78bf41d7c72df2e4fc87ea772f138c769",
+ "format": 1
+ },
+ {
+ "name": "plugins/plugin_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "plugins/plugin_utils/common.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf79729abc4cd3044ffa6e70e76af83956d6c686d72388b901a2b32387a4787d",
+ "format": 1
+ },
+ {
+ "name": "plugins/plugin_utils/common_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c44fd85b990483fb4cbf59c881508bbd6dfde2b16cd0890bb157dda52dfc1b97",
+ "format": 1
+ },
+ {
+ "name": "plugins/plugin_utils/socket_handler.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35cd4e8636a9d7bc209996b7a384c183f14f205485939744d2164d86a70c8e9b",
+ "format": 1
+ },
+ {
+ "name": "tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/ee",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles/current_container_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles/current_container_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles/current_container_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a69f4b9bd07c91387dc4722ba7a449c5ea0cb5fd7293f3676bf04bb6b65c929",
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles/docker_plain",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles/docker_plain/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles/docker_plain/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "750d3eb7999b8dcbf618797e3414b70c523948434f646c424845ce36feb13d41",
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles/docker_stack",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles/docker_stack/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/ee/roles/docker_stack/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "33196ce33a946992f9ab32d09fa7ef2e2e6f2b82d57b2b7dfdde8fa5788d3a4f",
+ "format": 1
+ },
+ {
+ "name": "tests/ee/all.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5e3219935d5ef7989afdae670ae1d84a36bcbd7f1e5dfcb15f6c5535ca52b0b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c28953826fdc5ca7bcb519d60bc7d1286b038dae761b63e43f6f0a163cc127b1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection/test.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1795c9109c37a127bfb605b3275b1673f59476c3c832979475b3c4f49bb008f1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection/test_connection.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b592195fed688108e3ab4748d9140bab2e28d150e0a4bb5a128f3c11caa7b02",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/runme-connection.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b92bc5256864e79bca068cded19c68d00d1014e45dc101ab7225e8fbaab06af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e22e771103606ae355c341ef62a7e92faf3386e792b5bbb5c3979bb9fea44f1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5b05e80ee60e868b634ac29fbe44a9453f109867892361b4ecf61e7016dcb75f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90fca01e7a2276ac2616ca60026535d3996b4de111479473d7471a9ceaf22be1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker/shutdown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c85e246c0bb1a679bb330c5b39bd18df208bddaaca5ceea4e970f8a6875c1a21",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker_api",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker_api/runme-connection.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b92bc5256864e79bca068cded19c68d00d1014e45dc101ab7225e8fbaab06af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker_api/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker_api/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker_api/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e22e771103606ae355c341ef62a7e92faf3386e792b5bbb5c3979bb9fea44f1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker_api/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7e79404445ba135ee4250d7e0789d95b19f94cd316702b30f6d3082a02eb3d30",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker_api/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90fca01e7a2276ac2616ca60026535d3996b4de111479473d7471a9ceaf22be1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_docker_api/shutdown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c85e246c0bb1a679bb330c5b39bd18df208bddaaca5ceea4e970f8a6875c1a21",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_nsenter",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_nsenter/runme-connection.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b92bc5256864e79bca068cded19c68d00d1014e45dc101ab7225e8fbaab06af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_nsenter/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_nsenter/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_nsenter/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55b316a767179f45e493b48d1ec6bd0c94dbbecb26040692731b5fd8ff201b8a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_nsenter/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7f6a528c5f20c98919cf8cb93d1487f05844c9f5eab75621cc7a294d67177a08",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_nsenter/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90fca01e7a2276ac2616ca60026535d3996b4de111479473d7471a9ceaf22be1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_nsenter/shutdown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c85e246c0bb1a679bb330c5b39bd18df208bddaaca5ceea4e970f8a6875c1a21",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_posix",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_posix/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b04c82c870ffc2902a90819cfd91df99dd6c9584734a2a2e646b1f804238262",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/connection_posix/test.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b92bc5256864e79bca068cded19c68d00d1014e45dc101ab7225e8fbaab06af",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/current_container_facts",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/current_container_facts/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/current_container_facts/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54d2101dacff52b47d7137e6ea3a422760d3c9e8563c18bc8fdcb986638e5233",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/current_container_facts/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "58354391cc6595ac28ad510056fd4be2a0fcd0c6e90a685520f1198fee05461a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47bfd6330a8616b7324f5a956839b953d844ed07b327c78a67ac86d1b65c6102",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3e694385e80262d4782e23b1a069ffd3361509b96057796cb6669d3d14b02ce1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose/tasks/tests/start-stop.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "088c5a24b830c92a6352529cc46f3e14f46d85e611960ae76c5a5934627a85ee",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1527d71c7fcb97bba10c5abe90cf36795b45583d33280d58a8b170a33dc27d36",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_compose/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f3d9ae2d6857600b4881f03e3b5cb30b9492a75bce28ba914c621af34f3b7fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "218aed170f9212c6c00e6485fb5c3aff3986ae487491b138c6dd2ee1b33766c4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/tasks/test_docker_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "08dd9ab43d652b6997690c262e39c873647d8427a683951ad540583b8a0d1f61",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_config/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "133d67cf2b2857f5a906a6e054498df8d5ddc556a3671cad8216d06d0dbd8e09",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/files/env-file",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d22d5c5709850e8af4d069878fa79d7d8e54b88518a1655c50826e4b50ad045",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/filter_plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60532ad91ec9ec288d9a6cc60e92e94828c681f23f4797af0fb687fad6adfe55",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/comparisons.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f622ba77bed23fe0bef21c621696db94325b0e98f067dc64f12abbcb7daa6a36",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/compatibility.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7a27f5ab737d3a39ff24c4521208432ecd553d77a265b07fb8499b1066a692d7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/image-ids.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b0440399c26e6c9b3cf2e29da8ef4e3a97a2c93ee675cd5e7a55c4f8e41785e8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2102d3f8e2e8cb629265e182f81378e8dc1e2dc0ff87832eb09c89b1ccbe8c32",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/network.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0e1a9237a5d7091040c045e149d7584015985938b4b0ee7a5b7f01fe81ef46a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5c152736a4e2c51367d3553cccc4b44320ac52eab374f4837b3eaaf29e6cc72",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/ports.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "138b2d31ec081219e3e436ad3b5e4dde3ffc5bc30aa4088c0e97c56f807cdb47",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3007b6ac1ae353ab8240d0c98382d05e16fe7867279b6f702ebcc03d9bf448de",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/start-stop.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7639592140dc4e886a39ef96197d99ef88d726b4be48a6a7ed063f435214bf9f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/tests/update.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "92297e5b4bff33636121f65b8b8ffb61212c5fd233cb7c50095e7668ed6613c7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f437ccc68a828f632b778b60368f62f93758c09bd3339df64ae427cd8e9ef9e7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01b45d7b6d896d5b801037e95e7e3225acf01da3732237ceb8ebf75e49adf60d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f3d9ae2d6857600b4881f03e3b5cb30b9492a75bce28ba914c621af34f3b7fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into/tasks/tests/content.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba9edfdb3876753bc2fd194a7076699d8da3b528667a6a65c39ba37c9b28a6c5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into/tasks/tests/file.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68ce2ed7af0be53ee05ff324533e82589622f263057bafb4f68a17f7dad723b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "524ff9fae84d128947e0b36eaf1ba800f55dcc1e4cec812ae4f4a86463d725e2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_copy_into/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_exec",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_exec/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_exec/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_exec/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_exec/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13a3a3c36fa72bd27f6b81556239c5ac3380c8da9e427e087a649d8361e2719f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_exec/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dd87ea6325a300ea4015d3090672aac951752d3a2b024e385f726ce44202c013",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_container_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "01b45d7b6d896d5b801037e95e7e3225acf01da3732237ceb8ebf75e49adf60d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b0baf4500b169ff8084559a7990f0462c4ca112519fa7410f481cb35c6f3427",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/tasks/test_host_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8876138d9d717c706d3131a7e17b762e25ad4079de0138e6062e2f603bed57f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_host_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7dbaea666eae950a1143d33cb471f602233866f4e39e4ece479ce828658a50ab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "142e5c02da389057b35f7439df89e0c3047f8ac3f0bad93513a770e0a04ee5cd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/tests/docker_image.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b00a60da257604ace692b6a044f374071d828d1ff005ffc786c23f8fae385f35",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a2f7b377f8b6c3ca7e3565a40ae4d81244f26ac5bbfdcc5dec20ed17fae268fd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1fe5f030f6ca6066ca6c9d8e41d697cc62bb457a55e76ef4dedff2e7fe896f1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cdc47967e0d64ebadcb0878d571976a21a5942ac286e88fdf00694e3e733f239",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates/ArgsDockerfile",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c3699620b7b078291c3f1d1c97414c9e961b51ce3705fdccb67545e87fe7e75e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates/Dockerfile",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d300419d4d1e626ddbd093c53831948c6ebb6d48a973ba4a0d9fa34cdf7462e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates/EtcHostsDockerfile",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f9d474c11ff7766ee58b74e50ecd80fba99b9ca1bab0b661c8f39c91dee23aed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates/MyDockerfile",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7d9039d8165a7c7170c5f1466438e9c27d6380abb329609d7601d34325709858",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/templates/StagedDockerfile",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d5882640f5183ed9b01fe7f06e144cd375bd402a1f2014392347c5e0bfce2222",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "426eca8e288e79157781391f44256414ede3708b23c39f29c92dd7db3091cf42",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f3d9ae2d6857600b4881f03e3b5cb30b9492a75bce28ba914c621af34f3b7fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86da5d9bdf5be5824548d45f489180bf3783a4a3e418f6632343ffa512860e04",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1fe5f030f6ca6066ca6c9d8e41d697cc62bb457a55e76ef4dedff2e7fe896f1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "095b8c21beee6803c2a06eae5119a074eb019e54ebf2d8f0f348047cdc4e8997",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_image_load/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c0879f5877b4d7a56bfd1fc54868e01542fd2c3a3c935fbaf51a6f8b1d78b98c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/tests/docker_login.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32bd7505493ca7016e201f730fda1d938ef0c0d1c40221405105c03785507e1b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/tests/multiple-servers.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "899043f20783bb683b488c8b225367a8c0a67ac1c3628591bddc8eff72a7f374",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c1fe5f030f6ca6066ca6c9d8e41d697cc62bb457a55e76ef4dedff2e7fe896f1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/tasks/test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "17cb11d752619a8dec4d8cb853f761e8b57fdb5e5ce5a5da262f9084609727a2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_login/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55534ba4791b0036bb9c117e9a7f6ae0394f80d5d4308a3dcf65f4564d7762e3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/ipam.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d125372c2a14262ec4da6484cff93e4f96936c00c32fc42502e587b8bf0b4de2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49435596b5906c270463be485f2cc09d16310ca49a66a48e3b1031f97db63253",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/overlay.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9b9ced6725b33b3fcae41e7341eb88bacedfdfaf7727abfe7304f5ff4fa9b867",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/tests/substring.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f7dc51530e72162b7a3c6806be4be3963aef34251036c222a06b1eeffc916985",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "783052d7b0b20a53eb53022d1a11291f58a2d6a1d5e1fe4cdaba53ef8f48ae91",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6aae8f07ab0cd76c415bc74687e29a6055c1fc27ae519bc5079d003d9309e6b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_network_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7227db085c3248209f3c6e2ca999811ed192a2ab188e378e18981d45e5be024f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/tasks/test_node.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bda1878d83565fb8898708430195d199316191192995faf33f56328291d02458",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a76f4a620dfae81104aa5ee3c8aedd5b41550acf46aa7485ccc0d0e6a8bbc5a5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b07afb61b4d0234f26b27e0154db8933eb315f472e3dd4bfc053d32ba63bcc74",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/tasks/test_node_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6a27a92f404cbb84de0e074ad065e4050da9116196d76cc4e79d1a310f37c6d8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_node_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ca2add1203cb87d27f8a9d1b37ebe4b8ab2837000380b695c048f1844eaa33f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "32dc8a4e25d13084a7e9c03f87b2b5f910ed8ce0aa90f78bded97d4afd1b17ad",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin/tasks/tests/basic_with_alias.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a290c1014a5a347ce52f13a2f64ad3df22375badffd5a7d222a1399ff5ad8d1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52bb0aa03f299c3dc12da10e484e0461c14fdff09b45613a80e537aaea6d344b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_plugin/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "277ac80b8d8597c2654bac67cd19434f0c478efe5f97cd341a5c90e1214c0463",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_prune/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f3d9ae2d6857600b4881f03e3b5cb30b9492a75bce28ba914c621af34f3b7fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "55b9bb68e043a78a28f8c23dfef0efca1ffd97c34f616f9a91bb9a94bf4287e8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/tasks/test_secrets.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d49058d648987547abe31655563a7663f1f5d71925417dcf249d795c45abdd44",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_secret/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "133d67cf2b2857f5a906a6e054498df8d5ddc556a3671cad8216d06d0dbd8e09",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f3d9ae2d6857600b4881f03e3b5cb30b9492a75bce28ba914c621af34f3b7fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ba502e7508aed562c8c1e2a29ae54c3684ef7175e8c75483fd4d728d5136b054",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/tasks/test_stack.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75abdee49bb9be0d447a2cd08a115fdfa0dcde8711ba10bc14d7ac42cc383cd5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/templates/stack_compose_base.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5558e4dad8831ef90d224785b65d7e8f2b1ad48f924cf80312b8005828c58e8c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1095458fd062b0208d910a56599ad4f748dc2997fad41168453c00499b224ae5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d7c3098ce206b9a9edf4aabdf1016615bafe0f6e9f549a85693bb6212187f3c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ca2add1203cb87d27f8a9d1b37ebe4b8ab2837000380b695c048f1844eaa33f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f3d9ae2d6857600b4881f03e3b5cb30b9492a75bce28ba914c621af34f3b7fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e7499cd99e0bbca866c83f9e97438811296d80caac7b25406ca6ddf8124c3c0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47f3a780523d0ade76b5f881890ced60757ab9a24988b4398691b0b849c47acc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5558e4dad8831ef90d224785b65d7e8f2b1ad48f924cf80312b8005828c58e8c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1095458fd062b0208d910a56599ad4f748dc2997fad41168453c00499b224ae5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d7c3098ce206b9a9edf4aabdf1016615bafe0f6e9f549a85693bb6212187f3c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ca2add1203cb87d27f8a9d1b37ebe4b8ab2837000380b695c048f1844eaa33f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0f3d9ae2d6857600b4881f03e3b5cb30b9492a75bce28ba914c621af34f3b7fa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "582b6f94d67b7c7d674fe8a5ad9d5fa35f130fd3ba5775281116112c752023e6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcfd63fef6c062d68b21cbbf4ecb44656d4be44f167cbb86f758edc4f8842b86",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/templates",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5558e4dad8831ef90d224785b65d7e8f2b1ad48f924cf80312b8005828c58e8c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1095458fd062b0208d910a56599ad4f748dc2997fad41168453c00499b224ae5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d7c3098ce206b9a9edf4aabdf1016615bafe0f6e9f549a85693bb6212187f3c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_stack_task_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2ca2add1203cb87d27f8a9d1b37ebe4b8ab2837000380b695c048f1844eaa33f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e40a69bda050967e474e8787b51b7343a60d5c7ac88685d66b556d160b942ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "387c884ec51f37bcc743d8fe78e9c13bbc53525d4a33f53ba005bdcb393c7b82",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e311704bfd0fb615c074cd1d88cc7d1f7aa0c044855ac2d20f9fbe7970b5cdd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "74407ee96908822f1669a5533152b377a3ed04acddbbbb9a8ecb0870ee830fa6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd8a708bda8339d4feedf71a2cc75d1f8290b12ecb46c3a5d8e8cb31bebbbfe2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "52911a90fe5216a651eb3f21c03b8c2efdcd3a2e6674037acb3389bfd21b45c0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "243eaa04af0c56a5361d4ae5e09fc7d1ee61ac8df474638a0d00dfa5cbf08db9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2939bfeaa28470c221965664ddf11c7ce25f6a9fcadc5f55269d0b8f3b9f833a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "13a1222f2d7ac6d7e4b7e7211c87611b37311abbe530091b1f9e29aacefab6e3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90c49732479e7ca86986ab2c60e33e6f76b8498153ac5c53f868853d7d41f501",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c8c82d6dae109c4dbb806e41bea1373d7168c14b933637e328f613cefbee7617",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/files/env-file-1",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6d22d5c5709850e8af4d069878fa79d7d8e54b88518a1655c50826e4b50ad045",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/files/env-file-2",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4de8bfd3c7ac6214b41503ee6f850d0fc2d3b39da724c7b89948f776f01ae6bf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0e08bc6e9da871f3893df657e47555078a5e051446f74c7ae04d88d71cea3caa",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "64d83c648bb5a90d040d22209445a005cde9cd5421410e85a9b58ac8b8416730",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fb520c37c45a16afd89c1db80ea47f87a29f1f6709fbac1c0129ed5336f2d092",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "feb2f64cf09a5c3c3671ddb63424106f859e56ee6458903fe1a88314d7913b5e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ee6f4d57049dd41e941bf38e68aa000210a8f672424db0f8d691f437faa5922e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/options.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "87cce2cbb976833056411398d1291604084d6e50790ded3d1dc6f68a96fb6888",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "76afa0a98842465343c95aee8c81af356c4fb7fd6787c1d6220606f2c0cac36f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4752983e928afbb32ed4c6b0b4e56b063bfa9c7f8bb4946cdfe32c82b9173866",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "939859cca097d56b76018f72b0c332e5aad767b51c2b4dbac7289d480d2ec13d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b16436cb7ffde9b0c6360bacc9c24e177e747e701c38e9b548ea4bcfb9ffad7d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dca8535022b87072bd2c56dfa28fc5191123d54bf85b77b84a06d50bb4e24947",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88c0380d4311ed88be4d4a8bae09c883c23eb6dc6150bd8fc812bb6dda6142f4",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "66e91bb3f282f7b18ca6a769723a257694c0d06721505e1d52a4c7f71c5edc61",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe5d12fec25ef974db885fd92993fc20baf102c5db843c58dd4263fc80c46ccf",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "133d67cf2b2857f5a906a6e054498df8d5ddc556a3671cad8216d06d0dbd8e09",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "49f01091077b498b757787c7f3903018b0f226193f4396e56e596129f5b0e67b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "de9df1f83213750697c77b492330e8272ab72e2e4b6132a90a67025fe03a72fe",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_swarm_service_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "133d67cf2b2857f5a906a6e054498df8d5ddc556a3671cad8216d06d0dbd8e09",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks/tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks/tests/basic.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "475e8c5fd3509e7f3e96eebbd26aa97e19fe2975feacd6830894062429392dd9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6543ef6a25f219df5e563d0c1f01ef07f5b0a3dab90f402e9829c1bc74877a9a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/tasks/run-test.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "da6c09c1ac08f548c321efdee418d3bf6a621f65725d3d6de095119600a3e876",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3c191889c5d70676ec95292c46169b822c7bd262a8f52294820f482bea2d188d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/docker_volume_info/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/files/nginx.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "381b6215898f79b477b52759f461ca424f6a9853edc0bc3d29790a2dfd8b8cd5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/filter_plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/filter_plugins/filter_attr.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1b0dd3b809d30ef9b8836388c0e36f336bb9e640cd633b41a90efdb1d7faad7b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2e40a69bda050967e474e8787b51b7343a60d5c7ac88685d66b556d160b942ea",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "880a774a7c576b4d4f18dc7308387997278fadb052687da474dfe4c060922cff",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14638ce9bffa141ed24360a6b3829bc932cc92d32534849dff7e47c8dbd4d337",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_connection_tests/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c22c090ea76cdd2a6ccd1689e016e014e4b1970a498e6db3123d5c95fb7a352b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_ssh_connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_ssh_connection/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_ssh_connection/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3f6945a1293b14cbefecf8301fcc7cabaa845fabc7e6e5d4df97a159a848bb2c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_ssh_connection/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_ssh_connection/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d252f557380bba5c18a7d7a395bedf317d1a3322d6ed0d28c24738edbc7b3c5f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/generic_ssh_connection/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd4b5ae853d3f070a2fd4b1ccdb6460d73fe1ffaa6b95e64bc49302b8bcf82ab",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/playbooks/docker_cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "234f2134d2618268ded9e87f250344cb54d1ffac76077dec39231ef435e69888",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/playbooks/docker_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "27487a6d48339d9cc60b3f212670c8d1e36c19f340cbf254ce24c183bbc59fe1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f9ec4b04a09105e25040fbc27f2dce25d25826d2c589cb2f50e9d9fe9cdb80c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2f8162017e23a5c9faeeadbfb6e5e0700f1e64f1d76bb5f4d21c44deb6593c4f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4e6239eaffde9b964ae0ea057e9ae6baa4f6cffef7c2268f5cf62c51119ae403",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/inventory_1.docker.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f3a49b3d86cc4cf58ce41ca680ce530d5f1dbcdb4beec75cf4f9268c35fa158",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/inventory_2.docker.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f6beeec0f8619270e63d4170509329cb1ee10be468ad063be53eff92f1da9340",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_containers/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9e9f61933604532e33a6df228c8165615864926686a4f278bccf7574725a471f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "06da4e8823ec83e13ab7e15e2448347a01ff3e86a26a7d5bae23a9a85630318f",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c6c4acace943f19a423f728fb3b7a198e4c0d16de5fd5c39dcc4e1ea42833f05",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5fb12ec2a705ea03541a5d495fdd74b57795355ccc8f119bf420d2fd6f712972",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d27d7401d8892de9d3af62c3480a002f0a7ea651a0a57790842c3f544447b0cb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "7b5d49be8f1ede2b4c37f1bc615adc0a60a717430e92655f88bd85d0beaabc87",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/docker-machine",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "be46924912c9e2ad743df5123a599c11e883e4fcf37f6521b17855c8e8548b60",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a6a2559f4b458d19217644547f8652c9f4559ee0f173c22db9d5f4b32f5f5071",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dc839d48eb9e045d698358e82410f7566fea3e83c5a74dca4b0a5b5c20604d63",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff090e5f93098c3d15ee6f8cbf9bfc35d896a430fe96ba4eab5779335778e4d0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b7766b726c1bd9f7f88d041949de164708050e7cc2391db54e760bfdf0c0a18e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b921d0f0539ea489e8328930b125ec2450c68df24888fa04c6e6a5004688fdf5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0b6cbfff0581c89d9114e38c9c9d6a5f688dbaa924e60b29e8d774eb1203c2ed",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "260def0e56e182996d606d1cf1844cb867a566dca8a703f2072ac7828b411845",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "68093251935f6f88a5224d35b3a27e3f7025e7e5f8e553163daf6981faf36478",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "47032a015d42f2e145e31cc49e090d009d6e936a6513488d41c2b634fa8ae067",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ce9bdef7ce1824461d19fbc4612ee107db92363f7be6471e8bbfae2761db9bc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a76f4a620dfae81104aa5ee3c8aedd5b41550acf46aa7485ccc0d0e6a8bbc5a5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d53b438fa30657a4719b2163d3e1d9ec1b4a76b857cc1d0c1fb75719de6140a1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4885e38129798ed56b4c6478dcf4021f8394b13cb3ff8105b15087427bd7db46",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/inventory_docker_swarm/runme.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b4b637a7352ac0fd0648dbced168f58e41cbc8b283d3b142575cc02dd7e84820",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "dcfce2a57ae37a11f8350da70b4cb56af42d7175b34257b93b91a1073b1e4d4a",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b3404a9480a664270525437804a3d4e92476437dcedea0c04500deb7e9447f25",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb68a78cb264725dfaa8d3647048f08b136626454fb58e349c909e13b19d4be1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "43b8d257ab837f98501833d5b91f8eee268be85accf168f3dfb9f478ac0d3af1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c16c3a0ff7c4a2e18e4c5bb19b399f5daca23c215824b24ebc0cc3bd7f186acc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ac7c57ac9681637833b5df5081ce65b8046c6fed06e10fefbd9f0288f8523fe2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d863eb3a64a49cd5f9d1967f5c6e4322df7c5ae23da2b77fc5548acc36c330a5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/RedHat-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "61b1cb888fba1895e31ae5aea25ae1425fc87e045809ad384c4d436a15265509",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/RedHat-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e32a46e88ab1f621858d0dfb1d4d6928fd9b497c413c2daa153a127543516a94",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/RedHat-9.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e32a46e88ab1f621858d0dfb1d4d6928fd9b497c413c2daa153a127543516a94",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a9a08463e216f4e3b18c53c3414e7f0142449f74271a3d1e8a9946e134f6be16",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8f2e31a9a81de6eb729424352002378a851066efef66e3643d6b36089fb35a5d",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1f985a9ad67fa63a915ab2c0d7c6f16eaad6f73f3ed8e769b22dd4c46023a7f6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/RedHat-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "646aba9f18362f886a8d806cc2129a958ba226d851d54e55b805140a8b14ab7e",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/RedHat-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a70c3f81eb70f10095a54ea59d8bd2d78fa5ed57f0c6baf7c2ea73473fad833b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/RedHat-9.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "22e0ea71c4c03de665af5fbb1f6c25f6da56b4005791f7ee6ab9d6ee6a362175",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "2b47800d1b37d41dc93ba12a5eab51ce8b4eef0560f93e1de80734b45cde9bfc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/Ubuntu-14.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "718e986033765b8744119a049b8d6c6efc048cb3eacd85edcb42c6a69e08cdcc",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/Ubuntu-22.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f3b92e7d0fc430c53df055f9a785d5c02a02a4722f2b503d1f09c721d29e8dcd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/main.env",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "341f81cf995a411aeee4799a14e238ee17d2552053de4376ceeb57363bd805b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14638ce9bffa141ed24360a6b3829bc932cc92d32534849dff7e47c8dbd4d337",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb09ffab7e455855046b8e8ef753c031bcd11a625c879517fa98335d23b15af8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5a0dcbb1be4a30a25685755533954499a8849453b5758f093fbc74c9025d1ae5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73fc081e424b642eeab13b34ec6594726fa079ea946df22fdf7e0cc75fece2d3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b71b69524c05e82aeea8ba71809e08218c45a108eb50e73bb81ddcfafbc0f48c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b70baef7cc2b4cc553b7a6dde5aba91de029be5c08c2d0373ec57a0bccacb645",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72be467f15cffa21fa7eefbb26646f78d49787b9d30b97d41bbc045242ecc792",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f75f1446ccac863d07c5a0503bef419b8be29b5556a10b7200bdf7ac2f87d6b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/RedHat-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0eb71ce9c438e1425fe68baefcd0e4ef0fdcf189f7610be09a3987f136270efd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/RedHat-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf017f59cbd79a3501fa97c8a25f789dd4344a58d5cbe3a53d68390b3930c642",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/RedHat-9.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf017f59cbd79a3501fa97c8a25f789dd4344a58d5cbe3a53d68390b3930c642",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8eb0b2d6ae8ba75408d500d36c1bc26325d79d3277f29a35bf6e7a7dc73cbac8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14c06a6d94f6eec3b1691433c009b560cccfe8edff15164ce7076ed03f73a603",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "347de1b4ae8ddb1c9c51606f248b23047ef5bd881d662eee6bf07d4b59ab7fc3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/CentOS-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20697da978c8ed625b345961999507bdc44548fc6f322804cce12c8106148a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/RedHat-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20697da978c8ed625b345961999507bdc44548fc6f322804cce12c8106148a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/RedHat-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c892ab9caff1f3abc4d33665d2dbc8d400aa5c19a8da57aa68b9d361e19a7d25",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/RedHat-9.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c892ab9caff1f3abc4d33665d2dbc8d400aa5c19a8da57aa68b9d361e19a7d25",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/Suse-py2.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20697da978c8ed625b345961999507bdc44548fc6f322804cce12c8106148a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/Suse-py3.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0061c7189c312bb9d118ae0390ed09046eb9de90e29df5da0db043f571de71b5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/Ubuntu-16.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20697da978c8ed625b345961999507bdc44548fc6f322804cce12c8106148a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/Ubuntu-18.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20697da978c8ed625b345961999507bdc44548fc6f322804cce12c8106148a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/Ubuntu.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0061c7189c312bb9d118ae0390ed09046eb9de90e29df5da0db043f571de71b5",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/defaults",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/defaults/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88ac959ec39f15f69e1541912e2a55c74ad2e6410b94b5c6f3d37e1c872af130",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "73fc081e424b642eeab13b34ec6594726fa079ea946df22fdf7e0cc75fece2d3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b71b69524c05e82aeea8ba71809e08218c45a108eb50e73bb81ddcfafbc0f48c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b70baef7cc2b4cc553b7a6dde5aba91de029be5c08c2d0373ec57a0bccacb645",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "72be467f15cffa21fa7eefbb26646f78d49787b9d30b97d41bbc045242ecc792",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f75f1446ccac863d07c5a0503bef419b8be29b5556a10b7200bdf7ac2f87d6b0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-7.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0eb71ce9c438e1425fe68baefcd0e4ef0fdcf189f7610be09a3987f136270efd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-8.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf017f59cbd79a3501fa97c8a25f789dd4344a58d5cbe3a53d68390b3930c642",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-9.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bf017f59cbd79a3501fa97c8a25f789dd4344a58d5cbe3a53d68390b3930c642",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8eb0b2d6ae8ba75408d500d36c1bc26325d79d3277f29a35bf6e7a7dc73cbac8",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "19c26334b4c5f43997149e30864ab9989a16a038f09b528e0d889610c58d7fc6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "31f09b7f032289ec891a08c4df8735e387e24e779b0b6c1a330361e19dc15fac",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/vars/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20697da978c8ed625b345961999507bdc44548fc6f322804cce12c8106148a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/vars/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "047f7e71a9d692af77f7b4450878eda6306af058e2ab8f83a8ff005cfa0f6db6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/vars/Fedora.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f20697da978c8ed625b345961999507bdc44548fc6f322804cce12c8106148a0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_compose_v2/vars/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63a1b79555c097c3c1d8b0bdb2b84b86b91873c6d3d55b7534067c8fea6d41fb",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/files",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/files/nginx.conf",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "235e9368742b2df29f384151996978b3dc53b656b446ce7bae814b46515af48b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/files/nginx.htpasswd",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "78a467484164809e507e71864bf48871ea79946ab426ce11334c1ab138ca07df",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/handlers/cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "53b156130794d1c5df35d9e01508a3533488abfe0e5ff309a1887cc4e04478b2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "0164b1d9b906a26e940039887c4e537d33393676ce190cf09ad2096362e773de",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df14f8571467afbfe26d30dca0d552af60569eef593778ceed169823636b6df0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "69567cec31856b0adcb7f98ece5daf2828a15932550322e4ad84048138296cb1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "366fffda5da5f3ef62aa30de4b1ddf2bab5059b2c88bb8241af00d73dab06208",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/tasks/setup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "35b7f4aab9306e42354462b5a1fb9795fd00321596a26483d971e907ed1718ec",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/vars/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "14638ce9bffa141ed24360a6b3829bc932cc92d32534849dff7e47c8dbd4d337",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_docker_registry/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d1b7da6a77f85a946d3fb9c0413c8ef74fce16c441a9327fa51ffab1098589a3",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_epel",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_epel/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_epel/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6ac1f4868552f389a3fcebc82a48e1d15570cc2f927bce89ce1f959fb8272622",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb68a78cb264725dfaa8d3647048f08b136626454fb58e349c909e13b19d4be1",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "83697397e012c51c3501608eabf00058f51087ca23adf55314651ce403c2f18b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Alpine.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3135d01f38467ef83cf1c232efc769b321421d50747f21e9d594ef01da26b14c",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Archlinux.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ab8e80d67956c9fc23670767cfbce1fecaf7f256f76b46b923b236ecf435b1a7",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Debian.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecf2668ce298103f8ea02d73cd442b68bf1abbf67d2357add4f67b4b23d9e5f9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/FreeBSD.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3d8a6ea8c9bd7ebb5891a190a4953644a5ab44ee380ac4c7068dfc833ad27fd",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/RedHat.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecf2668ce298103f8ea02d73cd442b68bf1abbf67d2357add4f67b4b23d9e5f9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_openssl/vars/Suse.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecf2668ce298103f8ea02d73cd442b68bf1abbf67d2357add4f67b4b23d9e5f9",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_paramiko",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_paramiko/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_paramiko/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "21ba22eb784d536b947fe4fbb008a211d03c25c77d19707a2e167b53205d0f6b",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_paramiko/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_paramiko/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "492cf351a422e1ff3d0260747979fab86803e60f23fb48491e0cb8706af484a6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_pkg_mgr/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b47d6aa6d6dbeb402deddde2a4f15ec990d6ef9901f5c610f18c352e13e0e5e6",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/meta",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/meta/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3b4106020a362e34cbc75011bfa013d65acb8224f6fbc931cfc6438b67888627",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bb8e2e8d36b9ecef1534b380c39e8402821718a0bf2246bf896d2222fa381cf0",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_constraints/aliases",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "60c77e9e9feaec01032830fa3e185152cb4aacbbf0b17279e6a9c3efb40b4835",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/handlers",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "eb71bb93b6958a495a1486357d097e5bc9ee2071a16dcc17f27780ad8af5ef77",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "54730c80c5fa787eea72dac59c88627036aeae96c48e27cbec02f9c1bd49a2d2",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "594627e64e5cb619d5d2a817b6f94c8750c61f21452ebfe3e85427d59996ec06",
+ "format": 1
+ },
+ {
+ "name": "tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "665db3263e44a68e5bd728127a0b1a6dcf93a93eb529e80900a4c4c3fa06d68e",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/extra-docs.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63af024dcda47dda7e3cead05953d33c858853d2adc046efb47af28a6a0dc96b",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/extra-docs.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/extra-docs.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "00ae2a5eeec7f3a621074473ee4d0d16f763c67e1c6c1b2f4b7dcd3ca171262c",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/licenses.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8c111eb62fa6f1b6c8a1260e9ff06c8b76ef8244428c6289969d79678093618f",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/licenses.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/licenses.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ff502f0707adf15a57b2fc48842fc47d154bfbd3aeadf4c0c05e96b0589c3cd4",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/licenses.py.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88f745b5d91e1371369c207e3392877af6f3e1de48fbaca63a728d4dcf79e03c",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a3d3b17f699b042958c7cd845a9d685bc935d83062e0bcf077f2c7200e2c0bac",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.json.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/extra/no-unwanted-files.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "416fdc4ddd0ad89ddad54bcacc8bf5790d3a9610f7c059167123046849f0ade9",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.10.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f29b7fd15a448c7e4db863939b521ba515ca347a04feec3904926f36d1df667b",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.10.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.11.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f29b7fd15a448c7e4db863939b521ba515ca347a04feec3904926f36d1df667b",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.11.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.12.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "b6df9bd553d2e95b00295bd64ea2ad10e6925108616c3ee435091d9936035937",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.12.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.13.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccc7d836b365401a29253c3db1fef85f976a8977eb57332de1136187ac45f39a",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.13.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.14.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccc7d836b365401a29253c3db1fef85f976a8977eb57332de1136187ac45f39a",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.14.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.15.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ccc7d836b365401a29253c3db1fef85f976a8977eb57332de1136187ac45f39a",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.15.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.9.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d49ed4d275b233af4655cbd623246de96dd7e5aff9356a409f4a8ea7c3cdb215",
+ "format": 1
+ },
+ {
+ "name": "tests/sanity/ignore-2.9.txt.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/__init__.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/builtins.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1a66bf5868ec79d871566ca33e62612c7467681405a2c8aef8a93a768c3deebb",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/mock.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "75bef6f2ce5456591fd26298e8181709215fa31da43c2d25ce70ae73fd6a2936",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/compat/unittest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3eac6f61233f8679a6d6732224de17066df8e6b90eeb1a465ce920809960a88b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/connection",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/connection/test_docker.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6935ce21e236c76bbca4f6013b0cda317c825e89d6e2216b995e3ff72b01155f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/inventory/test_docker_containers.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a267a4fb74b88a5069b0991f2d307234291e5e05735a4a363bde0bbdc73ac438",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/api",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/api/test_client.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cca7fe225e44648e6ecc14ee76480d0e9b07850b0df54f5ea797f0d8df0112ed",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/transport",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/transport/test_sshconn.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "63f010ba9bae645f8c95ebbcb00beb53e0303ebd59b4043b336a692fd43eff8f",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/transport/test_ssladapter.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "81cd1cd14e699a58eff8a368477dc036bc68c74f37c6e2840b29dca4bc7c5dc9",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/testdata",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/testdata/certs",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/testdata/certs/ca.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9fc3c10d22cf3dfb15727e16c71fe0de673a835dff0e711c208cf2421a3770e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/testdata/certs/cert.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9fc3c10d22cf3dfb15727e16c71fe0de673a835dff0e711c208cf2421a3770e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/testdata/certs/key.pem",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e9fc3c10d22cf3dfb15727e16c71fe0de673a835dff0e711c208cf2421a3770e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/test_build.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "958ce955e90bbec683cc490b2d9f50cb88d767ae8af6f9177923c45951472229",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/test_config.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2471729eb522d44788b670e0b2483a371965af2acf53971b23f7c1246acdfe2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/test_decorators.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c199f7436d7076adf09eb02cd9016697ad1d98d93a9805127b548feaae4a7fbf",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/test_json_stream.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9d04daafc18ea5b41a9f4b99140cf9c41fdd756fde5cb82b4800a6cd2157b48b",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/test_ports.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d881e823dbab6b2f9bd772ab5f01e367407a66a1074922c3abbd5e4a5ce738f0",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/test_proxy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bad097d44d0a44ff2023aad86161646aa28ac6e96c554d7b426204c58d3c3580",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/utils/test_utils.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "e1595599c25bbe64c07d99e1ee9d2b27effc8e950f7d7301995426febbac2927",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/fake_api.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "34922ee17f3c850bdde7f6b6910e1f465c802cae15ebae77bb5747f8f2921c57",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/fake_stat.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cb95a05c610164fdc6ccac0dcda57695d98f35a493e1d677b90b1b7d08681bc2",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/test_auth.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "86e10a21915fad656729eec85fe53df7f05d5319e331fdd6bacf8945faf45259",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/_api/test_errors.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "9f5cbcd9c32d7182cf03773dff7a84840fa6f6431d936cadd9faaac9e52076de",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test__scramble.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fe5598086a3826081072f676b57abaab9e302b056bda8b7bee6596134c5fa556",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_copy.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a56c05a45e5074482e451141bebb5e307b614686ba89731970742c4d9c9eddb1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_image_archive.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "adc3bfeabab056280ee95e87ee52627f88e43961bdcb06f1df2896d04cc619b1",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/module_utils/test_util.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "f109a556b20a1459ebe1ab4444a29b23e6d34caa3d211bbb9617d986d0033959",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/conftest.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bc5cd8bb3edd35ccd18027139e608db8c118853d44ca4435fa310092e4881f01",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_docker_image.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "88971516134166aba528faf1470a1538e2283e5bd90436a0493ced56264aac0e",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_docker_network.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "8ec6ea2325a5e752dea2ef746ae7b4baf94f851053a148cdabbe1acd98d255ff",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/modules/test_docker_swarm_service.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "5326d0c5adf3f14fbb623ac5efdb75b6f9dd7cd7e6fb5d8c8d099bc6ab58049c",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/test_support",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/unit/plugins/test_support/docker_image_archive_stubbing.py",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6efb38e3bdb69886aaf78359644361dade023060bc149c6ca7c67252792d4b56",
+ "format": 1
+ },
+ {
+ "name": "tests/unit/requirements.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "90372c8f9d83af7a3af63dcf3b9027930f6cb96a8aba721f016c48ab10aa585c",
+ "format": 1
+ },
+ {
+ "name": "tests/utils",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable",
+ "ftype": "dir",
+ "chksum_type": null,
+ "chksum_sha256": null,
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/alpine.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40bb478b59cd303fa65577f570d7392aede4cb794464e039b2b1454f08a4cdba",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/fedora.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40bb478b59cd303fa65577f570d7392aede4cb794464e039b2b1454f08a4cdba",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/rhel.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40bb478b59cd303fa65577f570d7392aede4cb794464e039b2b1454f08a4cdba",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/ubuntu.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40bb478b59cd303fa65577f570d7392aede4cb794464e039b2b1454f08a4cdba",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/linux-community.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "fd0e2613b086fb40cf1d5d1fcccc0b0c921c9b5bbac8b52ae57da460a1091534",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/linux.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "df97f7d9e7e231e7772cfb3d62a0c00f5e7523852d121e0d20492c1e9b5c92ad",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/remote.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "40bb478b59cd303fa65577f570d7392aede4cb794464e039b2b1454f08a4cdba",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/sanity.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "a5c298630e328b6de875b0db8a48db50592ca493f93004d7975cbfcb119a964f",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/shippable.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "59171fc13d0e2c92443c4e9d807a0c5b4143f59120c4e2bb1415a35474233cec",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/shippable/units.sh",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "1658b58109191789380e35d69fbfcb993c2cbf359c633ea1ecbe22aed8bf5056",
+ "format": 1
+ },
+ {
+ "name": "tests/utils/constraints.txt",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "c37e28740fda4f82ad9df0f481e977c11dd9b2c950b7faf74eb7d96429b8e70c",
+ "format": 1
+ },
+ {
+ "name": "tests/config.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "4a6a1ad768079ef3a02b727a199898f8465358c0c0bd3359fe4a59f57b537bb3",
+ "format": 1
+ },
+ {
+ "name": "tests/requirements.yml",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "ecdfa05f94a15039de65c5096a251d7c24e3cafbea97a9b213d825844aa90a44",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "cac6f280ec31250f52a98235789b46c9f94c2b230944ce7180047496c44a8564",
+ "format": 1
+ },
+ {
+ "name": "CHANGELOG.rst.license",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "6eb915239f9f35407fa68fdc41ed6522f1fdcce11badbdcd6057548023179ac1",
+ "format": 1
+ },
+ {
+ "name": "COPYING",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "3972dc9744f6499f0f9b2dbf76696f2ae7ad8af9b23dde66d6af86c9dfb36986",
+ "format": 1
+ },
+ {
+ "name": "README.md",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "d2110bcce37881a4a9a62c36821e188e5173be767b8b3ea483b82257b05006db",
+ "format": 1
+ }
+ ],
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/community/docker/LICENSES/Apache-2.0.txt b/ansible_collections/community/docker/LICENSES/Apache-2.0.txt
new file mode 100644
index 00000000..75191a4d
--- /dev/null
+++ b/ansible_collections/community/docker/LICENSES/Apache-2.0.txt
@@ -0,0 +1,191 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Copyright 2016 Docker, Inc.
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/ansible_collections/community/docker/LICENSES/GPL-3.0-or-later.txt b/ansible_collections/community/docker/LICENSES/GPL-3.0-or-later.txt
new file mode 100644
index 00000000..f288702d
--- /dev/null
+++ b/ansible_collections/community/docker/LICENSES/GPL-3.0-or-later.txt
@@ -0,0 +1,674 @@
+ GNU GENERAL PUBLIC LICENSE
+ Version 3, 29 June 2007
+
+ Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+ Preamble
+
+ The GNU General Public License is a free, copyleft license for
+software and other kinds of works.
+
+ The licenses for most software and other practical works are designed
+to take away your freedom to share and change the works. By contrast,
+the GNU General Public License is intended to guarantee your freedom to
+share and change all versions of a program--to make sure it remains free
+software for all its users. We, the Free Software Foundation, use the
+GNU General Public License for most of our software; it applies also to
+any other work released this way by its authors. You can apply it to
+your programs, too.
+
+ When we speak of free software, we are referring to freedom, not
+price. Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+them if you wish), that you receive source code or can get it if you
+want it, that you can change the software or use pieces of it in new
+free programs, and that you know you can do these things.
+
+ To protect your rights, we need to prevent others from denying you
+these rights or asking you to surrender the rights. Therefore, you have
+certain responsibilities if you distribute copies of the software, or if
+you modify it: responsibilities to respect the freedom of others.
+
+ For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must pass on to the recipients the same
+freedoms that you received. You must make sure that they, too, receive
+or can get the source code. And you must show them these terms so they
+know their rights.
+
+ Developers that use the GNU GPL protect your rights with two steps:
+(1) assert copyright on the software, and (2) offer you this License
+giving you legal permission to copy, distribute and/or modify it.
+
+ For the developers' and authors' protection, the GPL clearly explains
+that there is no warranty for this free software. For both users' and
+authors' sake, the GPL requires that modified versions be marked as
+changed, so that their problems will not be attributed erroneously to
+authors of previous versions.
+
+ Some devices are designed to deny users access to install or run
+modified versions of the software inside them, although the manufacturer
+can do so. This is fundamentally incompatible with the aim of
+protecting users' freedom to change the software. The systematic
+pattern of such abuse occurs in the area of products for individuals to
+use, which is precisely where it is most unacceptable. Therefore, we
+have designed this version of the GPL to prohibit the practice for those
+products. If such problems arise substantially in other domains, we
+stand ready to extend this provision to those domains in future versions
+of the GPL, as needed to protect the freedom of users.
+
+ Finally, every program is threatened constantly by software patents.
+States should not allow patents to restrict development and use of
+software on general-purpose computers, but in those that do, we wish to
+avoid the special danger that patents applied to a free program could
+make it effectively proprietary. To prevent this, the GPL assures that
+patents cannot be used to render the program non-free.
+
+ The precise terms and conditions for copying, distribution and
+modification follow.
+
+ TERMS AND CONDITIONS
+
+ 0. Definitions.
+
+ "This License" refers to version 3 of the GNU General Public License.
+
+ "Copyright" also means copyright-like laws that apply to other kinds of
+works, such as semiconductor masks.
+
+ "The Program" refers to any copyrightable work licensed under this
+License. Each licensee is addressed as "you". "Licensees" and
+"recipients" may be individuals or organizations.
+
+ To "modify" a work means to copy from or adapt all or part of the work
+in a fashion requiring copyright permission, other than the making of an
+exact copy. The resulting work is called a "modified version" of the
+earlier work or a work "based on" the earlier work.
+
+ A "covered work" means either the unmodified Program or a work based
+on the Program.
+
+ To "propagate" a work means to do anything with it that, without
+permission, would make you directly or secondarily liable for
+infringement under applicable copyright law, except executing it on a
+computer or modifying a private copy. Propagation includes copying,
+distribution (with or without modification), making available to the
+public, and in some countries other activities as well.
+
+ To "convey" a work means any kind of propagation that enables other
+parties to make or receive copies. Mere interaction with a user through
+a computer network, with no transfer of a copy, is not conveying.
+
+ An interactive user interface displays "Appropriate Legal Notices"
+to the extent that it includes a convenient and prominently visible
+feature that (1) displays an appropriate copyright notice, and (2)
+tells the user that there is no warranty for the work (except to the
+extent that warranties are provided), that licensees may convey the
+work under this License, and how to view a copy of this License. If
+the interface presents a list of user commands or options, such as a
+menu, a prominent item in the list meets this criterion.
+
+ 1. Source Code.
+
+ The "source code" for a work means the preferred form of the work
+for making modifications to it. "Object code" means any non-source
+form of a work.
+
+ A "Standard Interface" means an interface that either is an official
+standard defined by a recognized standards body, or, in the case of
+interfaces specified for a particular programming language, one that
+is widely used among developers working in that language.
+
+ The "System Libraries" of an executable work include anything, other
+than the work as a whole, that (a) is included in the normal form of
+packaging a Major Component, but which is not part of that Major
+Component, and (b) serves only to enable use of the work with that
+Major Component, or to implement a Standard Interface for which an
+implementation is available to the public in source code form. A
+"Major Component", in this context, means a major essential component
+(kernel, window system, and so on) of the specific operating system
+(if any) on which the executable work runs, or a compiler used to
+produce the work, or an object code interpreter used to run it.
+
+ The "Corresponding Source" for a work in object code form means all
+the source code needed to generate, install, and (for an executable
+work) run the object code and to modify the work, including scripts to
+control those activities. However, it does not include the work's
+System Libraries, or general-purpose tools or generally available free
+programs which are used unmodified in performing those activities but
+which are not part of the work. For example, Corresponding Source
+includes interface definition files associated with source files for
+the work, and the source code for shared libraries and dynamically
+linked subprograms that the work is specifically designed to require,
+such as by intimate data communication or control flow between those
+subprograms and other parts of the work.
+
+ The Corresponding Source need not include anything that users
+can regenerate automatically from other parts of the Corresponding
+Source.
+
+ The Corresponding Source for a work in source code form is that
+same work.
+
+ 2. Basic Permissions.
+
+ All rights granted under this License are granted for the term of
+copyright on the Program, and are irrevocable provided the stated
+conditions are met. This License explicitly affirms your unlimited
+permission to run the unmodified Program. The output from running a
+covered work is covered by this License only if the output, given its
+content, constitutes a covered work. This License acknowledges your
+rights of fair use or other equivalent, as provided by copyright law.
+
+ You may make, run and propagate covered works that you do not
+convey, without conditions so long as your license otherwise remains
+in force. You may convey covered works to others for the sole purpose
+of having them make modifications exclusively for you, or provide you
+with facilities for running those works, provided that you comply with
+the terms of this License in conveying all material for which you do
+not control copyright. Those thus making or running the covered works
+for you must do so exclusively on your behalf, under your direction
+and control, on terms that prohibit them from making any copies of
+your copyrighted material outside their relationship with you.
+
+ Conveying under any other circumstances is permitted solely under
+the conditions stated below. Sublicensing is not allowed; section 10
+makes it unnecessary.
+
+ 3. Protecting Users' Legal Rights From Anti-Circumvention Law.
+
+ No covered work shall be deemed part of an effective technological
+measure under any applicable law fulfilling obligations under article
+11 of the WIPO copyright treaty adopted on 20 December 1996, or
+similar laws prohibiting or restricting circumvention of such
+measures.
+
+ When you convey a covered work, you waive any legal power to forbid
+circumvention of technological measures to the extent such circumvention
+is effected by exercising rights under this License with respect to
+the covered work, and you disclaim any intention to limit operation or
+modification of the work as a means of enforcing, against the work's
+users, your or third parties' legal rights to forbid circumvention of
+technological measures.
+
+ 4. Conveying Verbatim Copies.
+
+ You may convey verbatim copies of the Program's source code as you
+receive it, in any medium, provided that you conspicuously and
+appropriately publish on each copy an appropriate copyright notice;
+keep intact all notices stating that this License and any
+non-permissive terms added in accord with section 7 apply to the code;
+keep intact all notices of the absence of any warranty; and give all
+recipients a copy of this License along with the Program.
+
+ You may charge any price or no price for each copy that you convey,
+and you may offer support or warranty protection for a fee.
+
+ 5. Conveying Modified Source Versions.
+
+ You may convey a work based on the Program, or the modifications to
+produce it from the Program, in the form of source code under the
+terms of section 4, provided that you also meet all of these conditions:
+
+ a) The work must carry prominent notices stating that you modified
+ it, and giving a relevant date.
+
+ b) The work must carry prominent notices stating that it is
+ released under this License and any conditions added under section
+ 7. This requirement modifies the requirement in section 4 to
+ "keep intact all notices".
+
+ c) You must license the entire work, as a whole, under this
+ License to anyone who comes into possession of a copy. This
+ License will therefore apply, along with any applicable section 7
+ additional terms, to the whole of the work, and all its parts,
+ regardless of how they are packaged. This License gives no
+ permission to license the work in any other way, but it does not
+ invalidate such permission if you have separately received it.
+
+ d) If the work has interactive user interfaces, each must display
+ Appropriate Legal Notices; however, if the Program has interactive
+ interfaces that do not display Appropriate Legal Notices, your
+ work need not make them do so.
+
+ A compilation of a covered work with other separate and independent
+works, which are not by their nature extensions of the covered work,
+and which are not combined with it such as to form a larger program,
+in or on a volume of a storage or distribution medium, is called an
+"aggregate" if the compilation and its resulting copyright are not
+used to limit the access or legal rights of the compilation's users
+beyond what the individual works permit. Inclusion of a covered work
+in an aggregate does not cause this License to apply to the other
+parts of the aggregate.
+
+ 6. Conveying Non-Source Forms.
+
+ You may convey a covered work in object code form under the terms
+of sections 4 and 5, provided that you also convey the
+machine-readable Corresponding Source under the terms of this License,
+in one of these ways:
+
+ a) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by the
+ Corresponding Source fixed on a durable physical medium
+ customarily used for software interchange.
+
+ b) Convey the object code in, or embodied in, a physical product
+ (including a physical distribution medium), accompanied by a
+ written offer, valid for at least three years and valid for as
+ long as you offer spare parts or customer support for that product
+ model, to give anyone who possesses the object code either (1) a
+ copy of the Corresponding Source for all the software in the
+ product that is covered by this License, on a durable physical
+ medium customarily used for software interchange, for a price no
+ more than your reasonable cost of physically performing this
+ conveying of source, or (2) access to copy the
+ Corresponding Source from a network server at no charge.
+
+ c) Convey individual copies of the object code with a copy of the
+ written offer to provide the Corresponding Source. This
+ alternative is allowed only occasionally and noncommercially, and
+ only if you received the object code with such an offer, in accord
+ with subsection 6b.
+
+ d) Convey the object code by offering access from a designated
+ place (gratis or for a charge), and offer equivalent access to the
+ Corresponding Source in the same way through the same place at no
+ further charge. You need not require recipients to copy the
+ Corresponding Source along with the object code. If the place to
+ copy the object code is a network server, the Corresponding Source
+ may be on a different server (operated by you or a third party)
+ that supports equivalent copying facilities, provided you maintain
+ clear directions next to the object code saying where to find the
+ Corresponding Source. Regardless of what server hosts the
+ Corresponding Source, you remain obligated to ensure that it is
+ available for as long as needed to satisfy these requirements.
+
+ e) Convey the object code using peer-to-peer transmission, provided
+ you inform other peers where the object code and Corresponding
+ Source of the work are being offered to the general public at no
+ charge under subsection 6d.
+
+ A separable portion of the object code, whose source code is excluded
+from the Corresponding Source as a System Library, need not be
+included in conveying the object code work.
+
+ A "User Product" is either (1) a "consumer product", which means any
+tangible personal property which is normally used for personal, family,
+or household purposes, or (2) anything designed or sold for incorporation
+into a dwelling. In determining whether a product is a consumer product,
+doubtful cases shall be resolved in favor of coverage. For a particular
+product received by a particular user, "normally used" refers to a
+typical or common use of that class of product, regardless of the status
+of the particular user or of the way in which the particular user
+actually uses, or expects or is expected to use, the product. A product
+is a consumer product regardless of whether the product has substantial
+commercial, industrial or non-consumer uses, unless such uses represent
+the only significant mode of use of the product.
+
+ "Installation Information" for a User Product means any methods,
+procedures, authorization keys, or other information required to install
+and execute modified versions of a covered work in that User Product from
+a modified version of its Corresponding Source. The information must
+suffice to ensure that the continued functioning of the modified object
+code is in no case prevented or interfered with solely because
+modification has been made.
+
+ If you convey an object code work under this section in, or with, or
+specifically for use in, a User Product, and the conveying occurs as
+part of a transaction in which the right of possession and use of the
+User Product is transferred to the recipient in perpetuity or for a
+fixed term (regardless of how the transaction is characterized), the
+Corresponding Source conveyed under this section must be accompanied
+by the Installation Information. But this requirement does not apply
+if neither you nor any third party retains the ability to install
+modified object code on the User Product (for example, the work has
+been installed in ROM).
+
+ The requirement to provide Installation Information does not include a
+requirement to continue to provide support service, warranty, or updates
+for a work that has been modified or installed by the recipient, or for
+the User Product in which it has been modified or installed. Access to a
+network may be denied when the modification itself materially and
+adversely affects the operation of the network or violates the rules and
+protocols for communication across the network.
+
+ Corresponding Source conveyed, and Installation Information provided,
+in accord with this section must be in a format that is publicly
+documented (and with an implementation available to the public in
+source code form), and must require no special password or key for
+unpacking, reading or copying.
+
+ 7. Additional Terms.
+
+ "Additional permissions" are terms that supplement the terms of this
+License by making exceptions from one or more of its conditions.
+Additional permissions that are applicable to the entire Program shall
+be treated as though they were included in this License, to the extent
+that they are valid under applicable law. If additional permissions
+apply only to part of the Program, that part may be used separately
+under those permissions, but the entire Program remains governed by
+this License without regard to the additional permissions.
+
+ When you convey a copy of a covered work, you may at your option
+remove any additional permissions from that copy, or from any part of
+it. (Additional permissions may be written to require their own
+removal in certain cases when you modify the work.) You may place
+additional permissions on material, added by you to a covered work,
+for which you have or can give appropriate copyright permission.
+
+ Notwithstanding any other provision of this License, for material you
+add to a covered work, you may (if authorized by the copyright holders of
+that material) supplement the terms of this License with terms:
+
+ a) Disclaiming warranty or limiting liability differently from the
+ terms of sections 15 and 16 of this License; or
+
+ b) Requiring preservation of specified reasonable legal notices or
+ author attributions in that material or in the Appropriate Legal
+ Notices displayed by works containing it; or
+
+ c) Prohibiting misrepresentation of the origin of that material, or
+ requiring that modified versions of such material be marked in
+ reasonable ways as different from the original version; or
+
+ d) Limiting the use for publicity purposes of names of licensors or
+ authors of the material; or
+
+ e) Declining to grant rights under trademark law for use of some
+ trade names, trademarks, or service marks; or
+
+ f) Requiring indemnification of licensors and authors of that
+ material by anyone who conveys the material (or modified versions of
+ it) with contractual assumptions of liability to the recipient, for
+ any liability that these contractual assumptions directly impose on
+ those licensors and authors.
+
+ All other non-permissive additional terms are considered "further
+restrictions" within the meaning of section 10. If the Program as you
+received it, or any part of it, contains a notice stating that it is
+governed by this License along with a term that is a further
+restriction, you may remove that term. If a license document contains
+a further restriction but permits relicensing or conveying under this
+License, you may add to a covered work material governed by the terms
+of that license document, provided that the further restriction does
+not survive such relicensing or conveying.
+
+ If you add terms to a covered work in accord with this section, you
+must place, in the relevant source files, a statement of the
+additional terms that apply to those files, or a notice indicating
+where to find the applicable terms.
+
+ Additional terms, permissive or non-permissive, may be stated in the
+form of a separately written license, or stated as exceptions;
+the above requirements apply either way.
+
+ 8. Termination.
+
+ You may not propagate or modify a covered work except as expressly
+provided under this License. Any attempt otherwise to propagate or
+modify it is void, and will automatically terminate your rights under
+this License (including any patent licenses granted under the third
+paragraph of section 11).
+
+ However, if you cease all violation of this License, then your
+license from a particular copyright holder is reinstated (a)
+provisionally, unless and until the copyright holder explicitly and
+finally terminates your license, and (b) permanently, if the copyright
+holder fails to notify you of the violation by some reasonable means
+prior to 60 days after the cessation.
+
+ Moreover, your license from a particular copyright holder is
+reinstated permanently if the copyright holder notifies you of the
+violation by some reasonable means, this is the first time you have
+received notice of violation of this License (for any work) from that
+copyright holder, and you cure the violation prior to 30 days after
+your receipt of the notice.
+
+ Termination of your rights under this section does not terminate the
+licenses of parties who have received copies or rights from you under
+this License. If your rights have been terminated and not permanently
+reinstated, you do not qualify to receive new licenses for the same
+material under section 10.
+
+ 9. Acceptance Not Required for Having Copies.
+
+ You are not required to accept this License in order to receive or
+run a copy of the Program. Ancillary propagation of a covered work
+occurring solely as a consequence of using peer-to-peer transmission
+to receive a copy likewise does not require acceptance. However,
+nothing other than this License grants you permission to propagate or
+modify any covered work. These actions infringe copyright if you do
+not accept this License. Therefore, by modifying or propagating a
+covered work, you indicate your acceptance of this License to do so.
+
+ 10. Automatic Licensing of Downstream Recipients.
+
+ Each time you convey a covered work, the recipient automatically
+receives a license from the original licensors, to run, modify and
+propagate that work, subject to this License. You are not responsible
+for enforcing compliance by third parties with this License.
+
+ An "entity transaction" is a transaction transferring control of an
+organization, or substantially all assets of one, or subdividing an
+organization, or merging organizations. If propagation of a covered
+work results from an entity transaction, each party to that
+transaction who receives a copy of the work also receives whatever
+licenses to the work the party's predecessor in interest had or could
+give under the previous paragraph, plus a right to possession of the
+Corresponding Source of the work from the predecessor in interest, if
+the predecessor has it or can get it with reasonable efforts.
+
+ You may not impose any further restrictions on the exercise of the
+rights granted or affirmed under this License. For example, you may
+not impose a license fee, royalty, or other charge for exercise of
+rights granted under this License, and you may not initiate litigation
+(including a cross-claim or counterclaim in a lawsuit) alleging that
+any patent claim is infringed by making, using, selling, offering for
+sale, or importing the Program or any portion of it.
+
+ 11. Patents.
+
+ A "contributor" is a copyright holder who authorizes use under this
+License of the Program or a work on which the Program is based. The
+work thus licensed is called the contributor's "contributor version".
+
+ A contributor's "essential patent claims" are all patent claims
+owned or controlled by the contributor, whether already acquired or
+hereafter acquired, that would be infringed by some manner, permitted
+by this License, of making, using, or selling its contributor version,
+but do not include claims that would be infringed only as a
+consequence of further modification of the contributor version. For
+purposes of this definition, "control" includes the right to grant
+patent sublicenses in a manner consistent with the requirements of
+this License.
+
+ Each contributor grants you a non-exclusive, worldwide, royalty-free
+patent license under the contributor's essential patent claims, to
+make, use, sell, offer for sale, import and otherwise run, modify and
+propagate the contents of its contributor version.
+
+ In the following three paragraphs, a "patent license" is any express
+agreement or commitment, however denominated, not to enforce a patent
+(such as an express permission to practice a patent or covenant not to
+sue for patent infringement). To "grant" such a patent license to a
+party means to make such an agreement or commitment not to enforce a
+patent against the party.
+
+ If you convey a covered work, knowingly relying on a patent license,
+and the Corresponding Source of the work is not available for anyone
+to copy, free of charge and under the terms of this License, through a
+publicly available network server or other readily accessible means,
+then you must either (1) cause the Corresponding Source to be so
+available, or (2) arrange to deprive yourself of the benefit of the
+patent license for this particular work, or (3) arrange, in a manner
+consistent with the requirements of this License, to extend the patent
+license to downstream recipients. "Knowingly relying" means you have
+actual knowledge that, but for the patent license, your conveying the
+covered work in a country, or your recipient's use of the covered work
+in a country, would infringe one or more identifiable patents in that
+country that you have reason to believe are valid.
+
+ If, pursuant to or in connection with a single transaction or
+arrangement, you convey, or propagate by procuring conveyance of, a
+covered work, and grant a patent license to some of the parties
+receiving the covered work authorizing them to use, propagate, modify
+or convey a specific copy of the covered work, then the patent license
+you grant is automatically extended to all recipients of the covered
+work and works based on it.
+
+ A patent license is "discriminatory" if it does not include within
+the scope of its coverage, prohibits the exercise of, or is
+conditioned on the non-exercise of one or more of the rights that are
+specifically granted under this License. You may not convey a covered
+work if you are a party to an arrangement with a third party that is
+in the business of distributing software, under which you make payment
+to the third party based on the extent of your activity of conveying
+the work, and under which the third party grants, to any of the
+parties who would receive the covered work from you, a discriminatory
+patent license (a) in connection with copies of the covered work
+conveyed by you (or copies made from those copies), or (b) primarily
+for and in connection with specific products or compilations that
+contain the covered work, unless you entered into that arrangement,
+or that patent license was granted, prior to 28 March 2007.
+
+ Nothing in this License shall be construed as excluding or limiting
+any implied license or other defenses to infringement that may
+otherwise be available to you under applicable patent law.
+
+ 12. No Surrender of Others' Freedom.
+
+ If conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot convey a
+covered work so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you may
+not convey it at all. For example, if you agree to terms that obligate you
+to collect a royalty for further conveying from those to whom you convey
+the Program, the only way you could satisfy both those terms and this
+License would be to refrain entirely from conveying the Program.
+
+ 13. Use with the GNU Affero General Public License.
+
+ Notwithstanding any other provision of this License, you have
+permission to link or combine any covered work with a work licensed
+under version 3 of the GNU Affero General Public License into a single
+combined work, and to convey the resulting work. The terms of this
+License will continue to apply to the part which is the covered work,
+but the special requirements of the GNU Affero General Public License,
+section 13, concerning interaction through a network will apply to the
+combination as such.
+
+ 14. Revised Versions of this License.
+
+ The Free Software Foundation may publish revised and/or new versions of
+the GNU General Public License from time to time. Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+ Each version is given a distinguishing version number. If the
+Program specifies that a certain numbered version of the GNU General
+Public License "or any later version" applies to it, you have the
+option of following the terms and conditions either of that numbered
+version or of any later version published by the Free Software
+Foundation. If the Program does not specify a version number of the
+GNU General Public License, you may choose any version ever published
+by the Free Software Foundation.
+
+ If the Program specifies that a proxy can decide which future
+versions of the GNU General Public License can be used, that proxy's
+public statement of acceptance of a version permanently authorizes you
+to choose that version for the Program.
+
+ Later license versions may give you additional or different
+permissions. However, no additional obligations are imposed on any
+author or copyright holder as a result of your choosing to follow a
+later version.
+
+ 15. Disclaimer of Warranty.
+
+ THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
+APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
+HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
+OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
+THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
+IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
+ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. Limitation of Liability.
+
+ IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
+THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
+GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
+USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
+DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
+PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
+EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
+SUCH DAMAGES.
+
+ 17. Interpretation of Sections 15 and 16.
+
+ If the disclaimer of warranty and limitation of liability provided
+above cannot be given local legal effect according to their terms,
+reviewing courts shall apply local law that most closely approximates
+an absolute waiver of all civil liability in connection with the
+Program, unless a warranty or assumption of liability accompanies a
+copy of the Program in return for a fee.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Programs
+
+ If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+ To do so, attach the following notices to the program. It is safest
+to attach them to the start of each source file to most effectively
+state the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the program's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This program is free software: you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation, either version 3 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program. If not, see <https://www.gnu.org/licenses/>.
+
+Also add information on how to contact you by electronic and paper mail.
+
+ If the program does terminal interaction, make it output a short
+notice like this when it starts in an interactive mode:
+
+ <program> Copyright (C) <year> <name of author>
+ This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+ This is free software, and you are welcome to redistribute it
+ under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License. Of course, your program's commands
+might be different; for a GUI interface, you would use an "about box".
+
+ You should also get your employer (if you work as a programmer) or school,
+if any, to sign a "copyright disclaimer" for the program, if necessary.
+For more information on this, and how to apply and follow the GNU GPL, see
+<https://www.gnu.org/licenses/>.
+
+ The GNU General Public License does not permit incorporating your program
+into proprietary programs. If your program is a subroutine library, you
+may consider it more useful to permit linking proprietary applications with
+the library. If this is what you want to do, use the GNU Lesser General
+Public License instead of this License. But first, please read
+<https://www.gnu.org/licenses/why-not-lgpl.html>.
diff --git a/ansible_collections/community/docker/MANIFEST.json b/ansible_collections/community/docker/MANIFEST.json
new file mode 100644
index 00000000..9ef20423
--- /dev/null
+++ b/ansible_collections/community/docker/MANIFEST.json
@@ -0,0 +1,33 @@
+{
+ "collection_info": {
+ "namespace": "community",
+ "name": "docker",
+ "version": "3.4.2",
+ "authors": [
+ "Ansible Docker Working Group"
+ ],
+ "readme": "README.md",
+ "tags": [
+ "docker"
+ ],
+ "description": "Modules and plugins for working with Docker",
+ "license": [
+ "GPL-3.0-or-later",
+ "Apache-2.0"
+ ],
+ "license_file": null,
+ "dependencies": {},
+ "repository": "https://github.com/ansible-collections/community.docker",
+ "documentation": null,
+ "homepage": "https://github.com/ansible-collections/community.docker",
+ "issues": "https://github.com/ansible-collections/community.docker/issues"
+ },
+ "file_manifest_file": {
+ "name": "FILES.json",
+ "ftype": "file",
+ "chksum_type": "sha256",
+ "chksum_sha256": "bd2d444ed37264d245ec8a523ee5be66695f3bed47138a20a71a0d30578e0834",
+ "format": 1
+ },
+ "format": 1
+} \ No newline at end of file
diff --git a/ansible_collections/community/docker/README.md b/ansible_collections/community/docker/README.md
new file mode 100644
index 00000000..e82e0a8e
--- /dev/null
+++ b/ansible_collections/community/docker/README.md
@@ -0,0 +1,135 @@
+<!--
+Copyright (c) Ansible Project
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+-->
+
+# Docker Community Collection
+
+[![Doc](https://img.shields.io/badge/docs-brightgreen.svg)](https://docs.ansible.com/ansible/latest/collections/community/docker/)
+[![Build Status](https://dev.azure.com/ansible/community.docker/_apis/build/status/CI?branchName=main)](https://dev.azure.com/ansible/community.docker/_build?definitionId=25)
+[![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.docker)](https://codecov.io/gh/ansible-collections/community.docker)
+
+This repo contains the `community.docker` Ansible Collection. The collection includes many modules and plugins to work with Docker.
+
+Please note that this collection does **not** support Windows targets. The connection plugins included in this collection support Windows targets on a best-effort basis, but we are not testing this in CI.
+
+## Tested with Ansible
+
+Tested with the current ansible-core 2.11, ansible-core 2.12, ansible-core 2.13, and ansible-core 2.14 releases, and the current development version of ansible-core. Ansible/ansible-base versions before 2.11.0 are not supported.
+
+Please note that Ansible 2.9 and ansible-base 2.10 are no longer supported. If you need to use them, use community.docker 2.x.y. Also note that this collection does not work with ansible-core 2.11 (this includes ansible-base and Ansible 2.9) on Python 3.12+.
+
+## External requirements
+
+Some modules and plugins require Docker CLI, or other external, programs. Some require the [Docker SDK for Python](https://pypi.org/project/docker/) and some use [requests](https://pypi.org/project/requests/) to directly communicate with the Docker daemon API. All modules and plugins require Python 2.7 or later. Python 2.6 is no longer supported; use community.docker 2.x.y if you need to use Python 2.6.
+
+Installing the Docker SDK for Python also installs the requirements for the modules and plugins that use `requests`. If you want to directly install the Python libraries instead of the SDK, you need the following ones:
+
+- [requests](https://pypi.org/project/requests/);
+- [pywin32](https://pypi.org/project/pywin32/) when using named pipes on Windows with the Windows 32 API;
+- [paramiko](https://pypi.org/project/paramiko/) when using SSH to connect to the Docker daemon with `use_ssh_client=false`;
+- [pyOpenSSL](https://pypi.org/project/pyOpenSSL/) when using TLS to connect to the Docker daemon;
+- [backports.ssl_match_hostname](https://pypi.org/project/backports.ssl_match_hostname/) when using TLS to connect to the Docker daemon on Python 2.
+
+If you have Docker SDK for Python < 2.0.0 installed ([docker-py](https://pypi.org/project/docker-py/)), you can still use it for modules that support it, though we recommend to uninstall it and then install [docker](https://pypi.org/project/docker/), the Docker SDK for Python >= 2.0.0. Note that both libraries cannot be installed at the same time. If you accidentally did install them simultaneously, you have to uninstall *both* before re-installing one of them.
+
+## Collection Documentation
+
+Browsing the [**latest** collection documentation](https://docs.ansible.com/ansible/latest/collections/community/docker) will show docs for the _latest version released in the Ansible package_, not the latest version of the collection released on Galaxy.
+
+Browsing the [**devel** collection documentation](https://docs.ansible.com/ansible/devel/collections/community/docker) shows docs for the _latest version released on Galaxy_.
+
+We also separately publish [**latest commit** collection documentation](https://ansible-collections.github.io/community.docker/branch/main/) which shows docs for the _latest commit in the `main` branch_.
+
+If you use the Ansible package and do not update collections independently, use **latest**. If you install or update this collection directly from Galaxy, use **devel**. If you are looking to contribute, use **latest commit**.
+
+## Included content
+
+* Connection plugins:
+ - community.docker.docker: use Docker containers as remotes using the Docker CLI program
+ - community.docker.docker_api: use Docker containers as remotes using the Docker API
+ - community.docker.nsenter: execute commands on the host running the controller container
+* Inventory plugins:
+ - community.docker.docker_containers: dynamic inventory plugin for Docker containers
+ - community.docker.docker_machine: collect Docker machines as inventory
+ - community.docker.docker_swarm: collect Docker Swarm nodes as inventory
+* Modules:
+ * Docker:
+ - community.docker.docker_container: manage Docker containers
+ - community.docker.docker_container_copy_into: copy a file into a Docker container
+ - community.docker.docker_container_exec: run commands in Docker containers
+ - community.docker.docker_container_info: retrieve information on Docker containers
+ - community.docker.docker_host_info: retrieve information on the Docker daemon
+ - community.docker.docker_image: manage Docker images
+ - community.docker.docker_image_info: retrieve information on Docker images
+ - community.docker.docker_image_load: load Docker images from archives
+ - community.docker.docker_login: log in and out to/from registries
+ - community.docker.docker_network: manage Docker networks
+ - community.docker.docker_network_info: retrieve information on Docker networks
+ - community.docker.docker_plugin: manage Docker plugins
+ - community.docker.docker_prune: prune Docker containers, images, networks, volumes, and build data
+ - community.docker.docker_volume: manage Docker volumes
+ - community.docker.docker_volume_info: retrieve information on Docker volumes
+ * Docker Compose:
+ - community.docker.docker_compose: manage Docker Compose files
+ * Docker Swarm:
+ - community.docker.docker_config: manage configurations
+ - community.docker.docker_node: manage Docker Swarm nodes
+ - community.docker.docker_node_info: retrieve information on Docker Swarm nodes
+ - community.docker.docker_secret: manage secrets
+ - community.docker.docker_swarm: manage Docker Swarm
+ - community.docker.docker_swarm_info: retrieve information on Docker Swarm
+ - community.docker.docker_swarm_service: manage Docker Swarm services
+ - community.docker.docker_swarm_service_info: retrieve information on Docker Swarm services
+ * Docker Stack:
+ - community.docker.docker_stack: manage Docker Stacks
+ - community.docker.docker_stack_info: retrieve information on Docker Stacks
+ - community.docker.docker_stack_task_info: retrieve information on tasks in Docker Stacks
+ * Other:
+ - current_container_facts: return facts about whether the module runs in a Docker container
+
+## Using this collection
+
+Before using the Docker community collection, you need to install the collection with the `ansible-galaxy` CLI:
+
+ ansible-galaxy collection install community.docker
+
+You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml` using the format:
+
+```yaml
+collections:
+- name: community.docker
+```
+
+See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details.
+
+## Contributing to this collection
+
+If you want to develop new content for this collection or improve what is already here, the easiest way to work on the collection is to clone it into one of the configured [`COLLECTIONS_PATH`](https://docs.ansible.com/ansible/latest/reference_appendices/config.html#collections-paths), and work on it there.
+
+You can find more information in the [developer guide for collections](https://docs.ansible.com/ansible/devel/dev_guide/developing_collections.html#contributing-to-collections), and in the [Ansible Community Guide](https://docs.ansible.com/ansible/latest/community/index.html).
+
+## Release notes
+
+See the [changelog](https://github.com/ansible-collections/community.docker/tree/main/CHANGELOG.rst).
+
+## More information
+
+- [Ansible Collection overview](https://github.com/ansible-collections/overview)
+- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html)
+- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html)
+- [Ansible Collections Checklist](https://github.com/ansible-collections/overview/blob/master/collection_requirements.rst)
+- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html)
+- [The Bullhorn (the Ansible Contributor newsletter)](https://us19.campaign-archive.com/home/?u=56d874e027110e35dea0e03c1&id=d6635f5420)
+- [Changes impacting Contributors](https://github.com/ansible-collections/overview/issues/45)
+
+## Licensing
+
+This collection is primarily licensed and distributed as a whole under the GNU General Public License v3.0 or later.
+
+See [LICENSES/GPL-3.0-or-later.txt](https://github.com/ansible-collections/community.docker/blob/main/COPYING) for the full text.
+
+Parts of the collection are licensed under the [Apache 2.0 license](https://github.com/ansible-collections/community.docker/blob/main/LICENSES/Apache-2.0.txt). This mostly applies to files vendored from the [Docker SDK for Python](https://github.com/docker/docker-py/).
+
+All files have a machine readable `SDPX-License-Identifier:` comment denoting its respective license(s) or an equivalent entry in an accompanying `.license` file. Only changelog fragments (which will not be part of a release) are covered by a blanket statement in `.reuse/dep5`. This conforms to the [REUSE specification](https://reuse.software/spec/).
diff --git a/ansible_collections/community/docker/changelogs/changelog.yaml b/ansible_collections/community/docker/changelogs/changelog.yaml
new file mode 100644
index 00000000..c57bfef4
--- /dev/null
+++ b/ansible_collections/community/docker/changelogs/changelog.yaml
@@ -0,0 +1,1141 @@
+ancestor: null
+releases:
+ 0.1.0:
+ changes:
+ bugfixes:
+ - docker_login - fix internal config file storage to handle credentials for
+ more than one registry (https://github.com/ansible-collections/community.general/issues/1117).
+ minor_changes:
+ - docker_container - now supports the ``device_requests`` option, which allows
+ to request additional resources such as GPUs (https://github.com/ansible/ansible/issues/65748,
+ https://github.com/ansible-collections/community.general/pull/1119).
+ release_summary: 'The ``community.docker`` continues the work on the Ansible
+ docker modules and plugins from their state in ``community.general`` 1.2.0.
+ The changes listed here are thus relative to the modules and plugins ``community.general.docker*``.
+
+
+ All deprecation removals planned for ``community.general`` 2.0.0 have been
+ applied. All deprecation removals scheduled for ``community.general`` 3.0.0
+ have been re-scheduled for ``community.docker`` 2.0.0.
+
+ '
+ removed_features:
+ - docker_container - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_container - the default of ``networks_cli_compatible`` changed to ``true``
+ (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_container - the unused option ``trust_image_content`` has been removed
+ (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_image - ``state=build`` has been removed. Use ``present`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_image - the ``container_limits``, ``dockerfile``, ``http_timeout``,
+ ``nocache``, ``rm``, ``path``, ``buildargs``, ``pull`` have been removed.
+ Use the corresponding suboptions of ``build`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_image - the ``force`` option has been removed. Use the more specific
+ ``force_*`` options instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_image - the ``source`` option is now mandatory (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_image - the ``use_tls`` option has been removed. Use ``tls`` and ``validate_certs``
+ instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_image - the default of the ``build.pull`` option changed to ``false``
+ (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_image_facts - this alias is on longer availabe, use ``docker_image_info``
+ instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_network - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_network - the ``ipam_options`` option has been removed. Use ``ipam_config``
+ instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_service - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_swarm - ``state=inspect`` has been removed. Use ``docker_swarm_info``
+ instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_swarm_service - the ``constraints`` option has been removed. Use ``placement.constraints``
+ instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_swarm_service - the ``limit_cpu`` and ``limit_memory`` options has
+ been removed. Use the corresponding suboptions in ``limits`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_swarm_service - the ``log_driver`` and ``log_driver_options`` options
+ has been removed. Use the corresponding suboptions in ``logging`` instead
+ (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_swarm_service - the ``reserve_cpu`` and ``reserve_memory`` options
+ has been removed. Use the corresponding suboptions in ``reservations`` instead
+ (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_swarm_service - the ``restart_policy``, ``restart_policy_attempts``,
+ ``restart_policy_delay`` and ``restart_policy_window`` options has been removed.
+ Use the corresponding suboptions in ``restart_config`` instead (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_swarm_service - the ``update_delay``, ``update_parallelism``, ``update_failure_action``,
+ ``update_monitor``, ``update_max_failure_ratio`` and ``update_order`` options
+ has been removed. Use the corresponding suboptions in ``update_config`` instead
+ (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_volume - no longer returns ``ansible_facts`` (https://github.com/ansible-collections/community.docker/pull/1).
+ - docker_volume - the ``force`` option has been removed. Use ``recreate`` instead
+ (https://github.com/ansible-collections/community.docker/pull/1).
+ fragments:
+ - 0.1.0.yml
+ - c.g-1118-docker_login-config-store.yml
+ - c.g-1119-docker_container-device-reqests.yml
+ - c.g-2.0.0-deprecations.yml
+ release_date: '2020-10-30'
+ 1.0.0:
+ changes:
+ minor_changes:
+ - Add collection-side support of the ``docker`` action group / module defaults
+ group (https://github.com/ansible-collections/community.docker/pull/17).
+ - docker_image - return docker build output (https://github.com/ansible-collections/community.general/pull/805).
+ - docker_secret - add a warning when the secret does not have an ``ansible_key``
+ label but the ``force`` parameter is not set (https://github.com/ansible-collections/community.docker/issues/30,
+ https://github.com/ansible-collections/community.docker/pull/31).
+ release_summary: 'This is the first production (non-prerelease) release of ``community.docker``.
+
+ '
+ fragments:
+ - 1.0.0.yml
+ - 17-action-group.yml
+ - 31-docker-secret.yml
+ - community.general-805-docker_image-build-output.yml
+ release_date: '2020-11-17'
+ 1.0.1:
+ changes:
+ bugfixes:
+ - docker_container - the validation for ``capabilities`` in ``device_requests``
+ was incorrect (https://github.com/ansible-collections/community.docker/issues/42,
+ https://github.com/ansible-collections/community.docker/pull/43).
+ release_summary: Maintenance release with a bugfix for ``docker_container``.
+ fragments:
+ - 1.0.1.yml
+ - 43-docker_container-device_requests.yml
+ release_date: '2020-12-11'
+ 1.1.0:
+ changes:
+ bugfixes:
+ - docker_image - if ``push=true`` is used with ``repository``, and the image
+ does not need to be tagged, still push. This can happen if ``repository``
+ and ``name`` are equal (https://github.com/ansible-collections/community.docker/issues/52,
+ https://github.com/ansible-collections/community.docker/pull/53).
+ - docker_image - report error when loading a broken archive that contains no
+ image (https://github.com/ansible-collections/community.docker/issues/46,
+ https://github.com/ansible-collections/community.docker/pull/55).
+ - docker_image - report error when the loaded archive does not contain the specified
+ image (https://github.com/ansible-collections/community.docker/issues/41,
+ https://github.com/ansible-collections/community.docker/pull/55).
+ deprecated_features:
+ - docker_container - currently ``published_ports`` can contain port mappings
+ next to the special value ``all``, in which case the port mappings are ignored.
+ This behavior is deprecated for community.docker 2.0.0, at which point it
+ will either be forbidden, or this behavior will be properly implemented similar
+ to how the Docker CLI tool handles this (https://github.com/ansible-collections/community.docker/issues/8,
+ https://github.com/ansible-collections/community.docker/pull/60).
+ minor_changes:
+ - docker_container - support specifying ``cgroup_parent`` (https://github.com/ansible-collections/community.docker/issues/6,
+ https://github.com/ansible-collections/community.docker/pull/59).
+ - docker_container - when a container is started with ``detached=false``, ``status``
+ is now also returned when it is 0 (https://github.com/ansible-collections/community.docker/issues/26,
+ https://github.com/ansible-collections/community.docker/pull/58).
+ - docker_image - support ``platform`` when building images (https://github.com/ansible-collections/community.docker/issues/22,
+ https://github.com/ansible-collections/community.docker/pull/54).
+ release_summary: Feature release with three new plugins and modules.
+ fragments:
+ - 1.1.0.yml
+ - 53-docker_image-tag-push.yml
+ - 54-docker_image-build-platform.yml
+ - 55-docker_image-loading.yml
+ - 58-docker_container-non-detached-status.yml
+ - 59-docker_container-cgroup-parent.yml
+ - 60-docker_container-publish-all.yml
+ modules:
+ - description: Return facts about whether the module runs in a Docker container
+ name: current_container_facts
+ namespace: ''
+ plugins:
+ connection:
+ - description: Run tasks in docker containers
+ name: docker_api
+ namespace: null
+ inventory:
+ - description: Ansible dynamic inventory plugin for Docker containers.
+ name: docker_containers
+ namespace: null
+ release_date: '2021-01-03'
+ 1.10.0:
+ changes:
+ minor_changes:
+ - Add the modules docker_container_exec, docker_image_load and docker_plugin
+ to the ``docker`` module defaults group (https://github.com/ansible-collections/community.docker/pull/209).
+ - docker_config - add option ``data_src`` to read configuration data from target
+ (https://github.com/ansible-collections/community.docker/issues/64, https://github.com/ansible-collections/community.docker/pull/203).
+ - docker_secret - add option ``data_src`` to read secret data from target (https://github.com/ansible-collections/community.docker/issues/64,
+ https://github.com/ansible-collections/community.docker/pull/203).
+ release_summary: Regular feature and bugfix release.
+ fragments:
+ - 1.10.0.yml
+ - 203-docker_secret-config-data_src.yml
+ - 209-action-group.yml
+ release_date: '2021-10-05'
+ 1.2.0:
+ changes:
+ bugfixes:
+ - docker_container - allow IPv6 zones (RFC 4007) in bind IPs (https://github.com/ansible-collections/community.docker/pull/66).
+ - docker_image - fix crash on loading images with versions of Docker SDK for
+ Python before 2.5.0 (https://github.com/ansible-collections/community.docker/issues/72,
+ https://github.com/ansible-collections/community.docker/pull/73).
+ minor_changes:
+ - docker_container - added ``default_host_ip`` option which allows to explicitly
+ set the default IP string for published ports without explicitly specified
+ IPs. When using IPv6 binds with Docker 20.10.2 or newer, this needs to be
+ set to an empty string (``""``) (https://github.com/ansible-collections/community.docker/issues/70,
+ https://github.com/ansible-collections/community.docker/pull/71).
+ release_summary: Feature release with one new feature and two bugfixes.
+ fragments:
+ - 1.2.0.yml
+ - 66-ipv6-zones.yml
+ - 71-docker_container-default_host_ip.yml
+ - 73-docker_image-fix-old-docker-py-version.yml
+ release_date: '2021-01-25'
+ 1.2.1:
+ changes:
+ bugfixes:
+ - docker connection plugin - fix Docker version parsing, as some docker versions
+ have a leading ``v`` in the output of the command ``docker version --format
+ "{{.Server.Version}}"`` (https://github.com/ansible-collections/community.docker/pull/76).
+ release_summary: Bugfix release.
+ fragments:
+ - 1.2.1.yml
+ - 76-leading-v-support-in-docker-version.yml
+ release_date: '2021-01-28'
+ 1.2.2:
+ changes:
+ release_summary: Security bugfix release to address CVE-2021-20191.
+ security_fixes:
+ - docker_swarm - enabled ``no_log`` for the option ``signing_ca_key`` to prevent
+ accidental disclosure (CVE-2021-20191, https://github.com/ansible-collections/community.docker/pull/80).
+ fragments:
+ - 1.2.2.yml
+ - CVE-2021-20191_no_log.yml
+ release_date: '2021-02-05'
+ 1.3.0:
+ changes:
+ bugfixes:
+ - docker_container - fix healthcheck disabling idempotency issue with strict
+ comparison (https://github.com/ansible-collections/community.docker/issues/85).
+ - docker_image - prevent module failure when removing image that is removed
+ between inspection and removal (https://github.com/ansible-collections/community.docker/pull/87).
+ - docker_image - prevent module failure when removing non-existant image by
+ ID (https://github.com/ansible-collections/community.docker/pull/87).
+ - docker_image_info - prevent module failure when image vanishes between listing
+ and inspection (https://github.com/ansible-collections/community.docker/pull/87).
+ - docker_image_info - prevent module failure when querying non-existant image
+ by ID (https://github.com/ansible-collections/community.docker/pull/87).
+ minor_changes:
+ - docker_container - add ``storage_opts`` option to specify storage options
+ (https://github.com/ansible-collections/community.docker/issues/91, https://github.com/ansible-collections/community.docker/pull/93).
+ - docker_image - allows to specify platform to pull for ``source=pull`` with
+ new option ``pull_platform`` (https://github.com/ansible-collections/community.docker/issues/79,
+ https://github.com/ansible-collections/community.docker/pull/89).
+ - docker_image - properly support image IDs (hashes) for loading and tagging
+ images (https://github.com/ansible-collections/community.docker/issues/86,
+ https://github.com/ansible-collections/community.docker/pull/87).
+ - docker_swarm_service - adding support for maximum number of tasks per node
+ (``replicas_max_per_node``) when running swarm service in replicated mode.
+ Introduced in API 1.40 (https://github.com/ansible-collections/community.docker/issues/7,
+ https://github.com/ansible-collections/community.docker/pull/92).
+ release_summary: Regular feature and bugfix release.
+ fragments:
+ - 1.3.0.yml
+ - 87-docker_image-load-image-ids.yml
+ - 88-docker_container-healthcheck.yml
+ - 89-docker_image-pull-platform.yml
+ - 92-replicas-max-per-node.yml
+ - 93-docker_container-storage_opts.yml
+ modules:
+ - description: Load docker image(s) from archives
+ name: docker_image_load
+ namespace: ''
+ - description: Manage Docker plugins
+ name: docker_plugin
+ namespace: ''
+ release_date: '2021-03-08'
+ 1.4.0:
+ changes:
+ breaking_changes:
+ - docker_swarm - if ``join_token`` is specified, a returned join token with
+ the same value will be replaced by ``VALUE_SPECIFIED_IN_NO_LOG_PARAMETER``.
+ Make sure that you do not blindly use the join tokens from the return value
+ of this module when the module is invoked with ``join_token`` specified! This
+ breaking change appears in a minor release since it is necessary to fix a
+ security issue (https://github.com/ansible-collections/community.docker/pull/103).
+ bugfixes:
+ - '``docker_swarm_service`` - fix KeyError on caused by reference to deprecated
+ option ``update_failure_action`` (https://github.com/ansible-collections/community.docker/pull/100).'
+ - docker_swarm_service - mark ``secrets`` module option with ``no_log=False``
+ since it does not leak secrets (https://github.com/ansible-collections/community.general/pull/2001).
+ minor_changes:
+ - docker_swarm_service - change ``publish.published_port`` option from mandatory
+ to optional. Docker will assign random high port if not specified (https://github.com/ansible-collections/community.docker/issues/99).
+ release_summary: Security release to address another potential secret leak.
+ Also includes regular bugfixes and features.
+ security_fixes:
+ - docker_swarm - the ``join_token`` option is now marked as ``no_log`` so it
+ is no longer written into logs (https://github.com/ansible-collections/community.docker/pull/103).
+ fragments:
+ - 1.4.0.yml
+ - 100-fix-update_failture_action-keyerror-in-docker_swarm_service.yaml
+ - 101-make-service-published-port-optional.yaml
+ - 102-no_log-false.yml
+ - 103-docker_swarm-join_token.yml
+ release_date: '2021-03-14'
+ 1.5.0:
+ changes:
+ bugfixes:
+ - all modules - use ``to_native`` to convert exceptions to strings (https://github.com/ansible-collections/community.docker/pull/121).
+ minor_changes:
+ - Add the ``use_ssh_client`` option to most docker modules and plugins (https://github.com/ansible-collections/community.docker/issues/108,
+ https://github.com/ansible-collections/community.docker/pull/114).
+ release_summary: Regular feature release.
+ fragments:
+ - 1.5.0.yml
+ - 114-use_ssh_client.yml
+ - 121-exception-handling.yml
+ modules:
+ - description: Execute command in a docker container
+ name: docker_container_exec
+ namespace: ''
+ release_date: '2021-04-11'
+ 1.6.0:
+ changes:
+ bugfixes:
+ - 'docker-compose - fix not pulling when ``state: present`` and ``stopped: true``
+ (https://github.com/ansible-collections/community.docker/issues/12, https://github.com/ansible-collections/community.docker/pull/119).'
+ - docker_plugin - also configure plugin after installing (https://github.com/ansible-collections/community.docker/issues/118,
+ https://github.com/ansible-collections/community.docker/pull/135).
+ - docker_swarm_services - avoid crash during idempotence check if ``published_port``
+ is not specified (https://github.com/ansible-collections/community.docker/issues/107,
+ https://github.com/ansible-collections/community.docker/pull/136).
+ deprecated_features:
+ - docker_* modules and plugins, except ``docker_swarm`` connection plugin and
+ ``docker_compose`` and ``docker_stack*` modules - the current default ``localhost``
+ for ``tls_hostname`` is deprecated. In community.docker 2.0.0 it will be computed
+ from ``docker_host`` instead (https://github.com/ansible-collections/community.docker/pull/134).
+ minor_changes:
+ - common module utils - correct error messages for guiding to install proper
+ Docker SDK for Python module (https://github.com/ansible-collections/community.docker/pull/125).
+ - 'docker_container - allow ``memory_swap: -1`` to set memory swap limit to
+ unlimited. This is useful when the user cannot set memory swap limits due
+ to cgroup limitations or other reasons, as by default Docker will try to set
+ swap usage to two times the value of ``memory`` (https://github.com/ansible-collections/community.docker/pull/138).'
+ release_summary: Regular bugfix and feature release.
+ fragments:
+ - 1.6.0.yml
+ - 12-correct_pull_wo_starting.yaml
+ - 125-correct-error-message-for-docker-sdk-version.yaml
+ - 134-tls_hostname.yml
+ - 135-docker_plugin-config.yml
+ - 136-docker_swarm_service-fix-idempotence-bug.yml
+ - 138-docker_container-allow-memory-swap-unlimited.yml
+ release_date: '2021-05-11'
+ 1.6.1:
+ changes:
+ bugfixes:
+ - docker_* modules and plugins, except ``docker_swarm`` connection plugin and
+ ``docker_compose`` and ``docker_stack*` modules - only emit ``tls_hostname``
+ deprecation message if TLS is actually used (https://github.com/ansible-collections/community.docker/pull/143).
+ release_summary: Bugfix release to reduce deprecation warning spam.
+ fragments:
+ - 1.6.1.yml
+ - 143-tls_hostname-deprecation.yml
+ release_date: '2021-05-17'
+ 1.7.0:
+ changes:
+ minor_changes:
+ - docker_image - allow to tag images by ID (https://github.com/ansible-collections/community.docker/pull/149).
+ release_summary: Small feature and bugfix release.
+ fragments:
+ - 1.7.0.yml
+ - 149-docker_image-tagging.yml
+ release_date: '2021-06-08'
+ 1.8.0:
+ changes:
+ bugfixes:
+ - 'docker_compose - fix idempotence bug when using ``stopped: true`` (https://github.com/ansible-collections/community.docker/issues/142,
+ https://github.com/ansible-collections/community.docker/pull/159).'
+ minor_changes:
+ - Avoid internal ansible-core module_utils in favor of equivalent public API
+ available since at least Ansible 2.9 (https://github.com/ansible-collections/community.docker/pull/164).
+ - docker_compose - added ``profiles`` option to specify service profiles when
+ starting services (https://github.com/ansible-collections/community.docker/pull/167).
+ - docker_containers inventory plugin - when ``connection_type=docker-api``,
+ now pass Docker daemon connection options from inventory plugin to connection
+ plugin. This can be disabled by setting ``configure_docker_daemon=false``
+ (https://github.com/ansible-collections/community.docker/pull/157).
+ - docker_host_info - allow values for keys in ``containers_filters``, ``images_filters``,
+ ``networks_filters``, and ``volumes_filters`` to be passed as YAML lists (https://github.com/ansible-collections/community.docker/pull/160).
+ - docker_plugin - added ``alias`` option to specify local names for docker plugins
+ (https://github.com/ansible-collections/community.docker/pull/161).
+ release_summary: Regular bugfix and feature release.
+ fragments:
+ - 1.8.0.yml
+ - 157-inventory-connection-options.yml
+ - 159-docker_compose-idempotence-fix.yml
+ - 160-docker_host_info-label-fitler-lists.yml
+ - 161-docker_plugin-alias-option.yml
+ - 167-docker_compose-profiles-option.yml
+ - ansible-core-_text.yml
+ release_date: '2021-06-28'
+ 1.9.0:
+ changes:
+ bugfixes:
+ - docker_compose - fixes task failures when bringing up services while using
+ ``docker-compose <1.17.0`` (https://github.com/ansible-collections/community.docker/issues/180).
+ - docker_container - make sure to also return ``container`` on ``detached=false``
+ when status code is non-zero (https://github.com/ansible-collections/community.docker/pull/178).
+ - docker_stack_info - make sure that module isn't skipped in check mode (https://github.com/ansible-collections/community.docker/pull/183).
+ - docker_stack_task_info - make sure that module isn't skipped in check mode
+ (https://github.com/ansible-collections/community.docker/pull/183).
+ deprecated_features:
+ - docker_container - the new ``command_handling``'s default value, ``compatibility``,
+ is deprecated and will change to ``correct`` in community.docker 3.0.0. A
+ deprecation warning is emitted by the module in cases where the behavior will
+ change. Please note that ansible-core will output a deprecation warning only
+ once, so if it is shown for an earlier task, there could be more tasks with
+ this warning where it is not shown (https://github.com/ansible-collections/community.docker/pull/186).
+ minor_changes:
+ - docker_* modules - include ``ImportError`` traceback when reporting that Docker
+ SDK for Python could not be found (https://github.com/ansible-collections/community.docker/pull/188).
+ - docker_compose - added ``env_file`` option for specifying custom environment
+ files (https://github.com/ansible-collections/community.docker/pull/174).
+ - docker_container - added ``publish_all_ports`` option to publish all exposed
+ ports to random ports except those explicitly bound with ``published_ports``
+ (this was already added in community.docker 1.8.0) (https://github.com/ansible-collections/community.docker/pull/162).
+ - docker_container - added new ``command_handling`` option with current deprecated
+ default value ``compatibility`` which allows to control how the module handles
+ shell quoting when interpreting lists, and how the module handles empty lists/strings.
+ The default will switch to ``correct`` in community.docker 3.0.0 (https://github.com/ansible-collections/community.docker/pull/186).
+ - docker_container - lifted restriction preventing the creation of anonymous
+ volumes with the ``mounts`` option (https://github.com/ansible-collections/community.docker/pull/181).
+ release_summary: New bugfixes and features release.
+ fragments:
+ - 1.9.0.yml
+ - 162-docker_container_publish_all_option.yml
+ - 174-docker_compose-env_file.yml
+ - 178-docker_container-container.yml
+ - 181-docker_container-allow-anonymous-volume-mounts.yml
+ - 182-docker_compose-fix-start-keyword-failures.yml
+ - 183-info-check_mode.yml
+ - 186-docker_container-command-entrypoint.yml
+ - 188-improve-import-errors.yml
+ plugins:
+ connection:
+ - description: execute on host running controller container
+ name: nsenter
+ namespace: null
+ release_date: '2021-08-03'
+ 1.9.1:
+ changes:
+ bugfixes:
+ - docker_compose - fixed incorrect ``changed`` status for services with ``profiles``
+ defined, but none enabled (https://github.com/ansible-collections/community.docker/pull/192).
+ release_summary: Regular bugfix release.
+ fragments:
+ - 1.9.1.yml
+ - 192-docker_compose-profiles-idempotency-fix.yml
+ release_date: '2021-08-29'
+ 2.0.0:
+ changes:
+ breaking_changes:
+ - docker_compose - fixed ``timeout`` defaulting behavior so that ``stop_grace_period``,
+ if defined in the compose file, will be used if `timeout`` is not specified
+ (https://github.com/ansible-collections/community.docker/pull/163).
+ deprecated_features:
+ - docker_container - using the special value ``all`` in ``published_ports``
+ has been deprecated. Use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/210).
+ release_summary: New major release with some deprecations removed and a breaking
+ change in the ``docker_compose`` module regarding the ``timeout`` parameter.
+ removed_features:
+ - docker_container - the default value of ``container_default_behavior`` changed
+ to ``no_defaults`` (https://github.com/ansible-collections/community.docker/pull/210).
+ - docker_container - the default value of ``network_mode`` is now the name of
+ the first network specified in ``networks`` if such are specified and ``networks_cli_compatible=true``
+ (https://github.com/ansible-collections/community.docker/pull/210).
+ - docker_container - the special value ``all`` can no longer be used in ``published_ports``
+ next to other values. Please use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/210).
+ - docker_login - removed the ``email`` option (https://github.com/ansible-collections/community.docker/pull/210).
+ fragments:
+ - 163-docker_compose-timeout-fix.yml
+ - 2.0.0.yml
+ - 210-deprecations.yml
+ release_date: '2021-10-21'
+ 2.0.1:
+ changes:
+ release_summary: Maintenance release with some documentation fixes.
+ fragments:
+ - 2.0.1.yml
+ release_date: '2021-11-13'
+ 2.0.2:
+ changes:
+ bugfixes:
+ - docker_api connection plugin - avoid passing an unnecessary argument to a
+ Docker SDK for Python call that is only supported by version 3.0.0 or later
+ (https://github.com/ansible-collections/community.docker/pull/243).
+ - docker_container_exec - ``chdir`` is only supported since Docker SDK for Python
+ 3.0.0. Make sure that this option can only use when 3.0.0 or later is installed,
+ and prevent passing this parameter on when ``chdir`` is not provided to this
+ module (https://github.com/ansible-collections/community.docker/pull/243,
+ https://github.com/ansible-collections/community.docker/issues/242).
+ - nsenter connection plugin - ensure the ``nsenter_pid`` option is retrieved
+ in ``_connect`` instead of ``__init__`` to prevent a crasher due to bad initialization
+ order (https://github.com/ansible-collections/community.docker/pull/249).
+ - nsenter connection plugin - replace the use of ``--all-namespaces`` with specific
+ namespaces to support compatibility with Busybox nsenter (used on, for example,
+ Alpine containers) (https://github.com/ansible-collections/community.docker/pull/249).
+ release_summary: Bugfix release.
+ fragments:
+ - 2.0.2.yml
+ - 243-docker_container_exec-chdir.yml
+ - 249-nsenter-fixes.yml
+ release_date: '2021-12-09'
+ 2.1.0:
+ changes:
+ bugfixes:
+ - Various modules and plugins - use vendored version of ``distutils.version``
+ included in ansible-core 2.12 if available. This avoids breakage when ``distutils``
+ is removed from the standard library of Python 3.12. Note that ansible-core
+ 2.11, ansible-base 2.10 and Ansible 2.9 are right now not compatible with
+ Python 3.12, hence this fix does not target these ansible-core/-base/2.9 versions
+ (https://github.com/ansible-collections/community.docker/pull/258).
+ - docker connection plugin - replace deprecated ``distutils.spawn.find_executable``
+ with Ansible's ``get_bin_path`` to find the ``docker`` executable (https://github.com/ansible-collections/community.docker/pull/257).
+ - docker_container_exec - disallow using the ``chdir`` option for Docker API
+ before 1.35 (https://github.com/ansible-collections/community.docker/pull/253).
+ minor_changes:
+ - docker_container_exec - add ``detach`` parameter (https://github.com/ansible-collections/community.docker/issues/250,
+ https://github.com/ansible-collections/community.docker/pull/255).
+ - docker_container_exec - add ``env`` option (https://github.com/ansible-collections/community.docker/issues/248,
+ https://github.com/ansible-collections/community.docker/pull/254).
+ release_summary: Feature and bugfix release.
+ fragments:
+ - 2.1.0.yml
+ - 253-chdir-min-version.yml
+ - 254-docker_container_exec-env.yml
+ - 255-docker_container_exec-detach.yml
+ - 257-remove-distutils-spawn.yml
+ - 258-distutils.version.yml
+ release_date: '2022-01-04'
+ 2.1.1:
+ changes:
+ bugfixes:
+ - Fix unintended breaking change caused by `an earlier fix <https://github.com/ansible-collections/community.docker/pull/258>`_
+ by vendoring the deprecated Python standard library ``distutils.version``
+ until this collection stops supporting Ansible 2.9 and ansible-base 2.10 (https://github.com/ansible-collections/community.docker/issues/267,
+ https://github.com/ansible-collections/community.docker/pull/269).
+ release_summary: Emergency release to amend breaking change in previous release.
+ fragments:
+ - 2.1.1.yml
+ - 269-distutils-version-fix.yml
+ release_date: '2022-01-05'
+ 2.2.0:
+ changes:
+ bugfixes:
+ - docker_container, docker_image - adjust image finding code to pecularities
+ of ``podman-docker``'s API emulation when Docker short names like ``redis``
+ are used (https://github.com/ansible-collections/community.docker/issues/292).
+ minor_changes:
+ - docker_config - add support for rolling update, set ``rolling_versions`` to
+ ``true`` to enable (https://github.com/ansible-collections/community.docker/pull/295,
+ https://github.com/ansible-collections/community.docker/issues/109).
+ - docker_secret - add support for rolling update, set ``rolling_versions`` to
+ ``true`` to enable (https://github.com/ansible-collections/community.docker/pull/293,
+ https://github.com/ansible-collections/community.docker/issues/21).
+ - docker_swarm_service - add support for setting capabilities with the ``cap_add``
+ and ``cap_drop`` parameters. Usage is the same as with the ``capabilities``
+ and ``cap_drop`` parameters for ``docker_container`` (https://github.com/ansible-collections/community.docker/pull/294).
+ release_summary: Regular feature and bugfix release.
+ fragments:
+ - 2.2.0.yml
+ - 270-rolling-secrets.yml
+ - 271-swarm-service-capabilities.yml
+ - 272-rolling-configs.yml
+ - 292-docker-podman-compatibility.yml
+ release_date: '2022-02-21'
+ 2.2.1:
+ changes:
+ bugfixes:
+ - docker_compose - fix Python 3 type error when extracting warnings or errors
+ from docker-compose's output (https://github.com/ansible-collections/community.docker/pull/305).
+ release_summary: Regular bugfix release.
+ fragments:
+ - 2.2.1.yml
+ - 305-docker_compose-errors-warnings.yml
+ release_date: '2022-03-14'
+ 2.3.0:
+ changes:
+ bugfixes:
+ - docker connection plugin - fix option handling to be compatible with ansible-core
+ 2.13 (https://github.com/ansible-collections/community.docker/pull/297, https://github.com/ansible-collections/community.docker/issues/307).
+ - docker_api connection plugin - fix option handling to be compatible with ansible-core
+ 2.13 (https://github.com/ansible-collections/community.docker/pull/308).
+ minor_changes:
+ - docker connection plugin - implement connection reset by clearing internal
+ container user cache (https://github.com/ansible-collections/community.docker/pull/312).
+ - docker connection plugin - simplify ``actual_user`` handling code (https://github.com/ansible-collections/community.docker/pull/311).
+ - docker connection plugin - the plugin supports new ways to define the timeout.
+ These are the ``ANSIBLE_DOCKER_TIMEOUT`` environment variable, the ``timeout``
+ setting in the ``docker_connection`` section of ``ansible.cfg``, and the ``ansible_docker_timeout``
+ variable (https://github.com/ansible-collections/community.docker/pull/297).
+ - docker_api connection plugin - implement connection reset by clearing internal
+ container user/group ID cache (https://github.com/ansible-collections/community.docker/pull/312).
+ - docker_api connection plugin - the plugin supports new ways to define the
+ timeout. These are the ``ANSIBLE_DOCKER_TIMEOUT`` environment variable, the
+ ``timeout`` setting in the ``docker_connection`` section of ``ansible.cfg``,
+ and the ``ansible_docker_timeout`` variable (https://github.com/ansible-collections/community.docker/pull/308).
+ release_summary: Regular feature and bugfix release.
+ fragments:
+ - 2.3.0.yml
+ - 297-docker-connection-config.yml
+ - 308-docker_api-connection-config.yml
+ - 311-docker-actual_user.yml
+ - 312-docker-connection-reset.yml
+ release_date: '2022-03-28'
+ 2.4.0:
+ changes:
+ bugfixes:
+ - docker connection plugin - make sure that ``docker_extra_args`` is used for
+ querying the Docker version. Also ensures that the Docker version is only
+ queried when needed. This is currently the case if a remote user is specified
+ (https://github.com/ansible-collections/community.docker/issues/325, https://github.com/ansible-collections/community.docker/pull/327).
+ minor_changes:
+ - Prepare collection for inclusion in an Execution Environment by declaring
+ its dependencies. The ``docker_stack*`` modules are not supported (https://github.com/ansible-collections/community.docker/pull/336).
+ - current_container_facts - add detection for GitHub Actions (https://github.com/ansible-collections/community.docker/pull/336).
+ - docker_container - support returning Docker container log output when using
+ Docker's ``local`` logging driver, an optimized local logging driver introduced
+ in Docker 18.09 (https://github.com/ansible-collections/community.docker/pull/337).
+ release_summary: Regular feature and bugfix release.
+ fragments:
+ - 2.4.0.yml
+ - 327-connection-fix.yml
+ - 336-ee.yml
+ - 337-container-output-from-local-logging-driver.yml
+ release_date: '2022-04-25'
+ 2.5.0:
+ changes:
+ minor_changes:
+ - docker_config - add support for ``template_driver`` with one option ``golang``
+ (https://github.com/ansible-collections/community.docker/issues/332, https://github.com/ansible-collections/community.docker/pull/345).
+ - docker_swarm - adds ``data_path_addr`` parameter during swarm initialization
+ or when joining (https://github.com/ansible-collections/community.docker/issues/339).
+ release_summary: Regular feature release.
+ fragments:
+ - 2.5.0.yml
+ - 344-adds-data-path-addr.yml
+ - 345-docker_config-template-driver.yml
+ release_date: '2022-05-14'
+ 2.5.1:
+ changes:
+ bugfixes:
+ - Include ``PSF-license.txt`` file for ``plugins/module_utils/_version.py``.
+ release_summary: Maintenance release.
+ fragments:
+ - 2.5.1.yml
+ - psf-license.yml
+ release_date: '2022-05-16'
+ 2.6.0:
+ changes:
+ bugfixes:
+ - docker_container - fail with a meaningful message instead of crashing if a
+ port is specified with more than three colon-separated parts (https://github.com/ansible-collections/community.docker/pull/367,
+ https://github.com/ansible-collections/community.docker/issues/365).
+ - docker_container - remove unused code that will cause problems with Python
+ 3.13 (https://github.com/ansible-collections/community.docker/pull/354).
+ deprecated_features:
+ - Support for Ansible 2.9 and ansible-base 2.10 is deprecated, and will be removed
+ in the next major release (community.docker 3.0.0). Some modules might still
+ work with these versions afterwards, but we will no longer keep compatibility
+ code that was needed to support them (https://github.com/ansible-collections/community.docker/pull/361).
+ - The dependency on docker-compose for Execution Environments is deprecated
+ and will be removed in community.docker 3.0.0. The `Python docker-compose
+ library <https://pypi.org/project/docker-compose/>`__ is unmaintained and
+ can cause dependency issues. You can manually still install it in an Execution
+ Environment when needed (https://github.com/ansible-collections/community.docker/pull/373).
+ - Various modules - the default of ``tls_hostname`` that was supposed to be
+ removed in community.docker 2.0.0 will now be removed in version 3.0.0 (https://github.com/ansible-collections/community.docker/pull/362).
+ - docker_stack - the return values ``out`` and ``err`` that were supposed to
+ be removed in community.docker 2.0.0 will now be removed in version 3.0.0
+ (https://github.com/ansible-collections/community.docker/pull/362).
+ minor_changes:
+ - docker_container - added ``image_label_mismatch`` parameter (https://github.com/ansible-collections/community.docker/issues/314,
+ https://github.com/ansible-collections/community.docker/pull/370).
+ release_summary: Bugfix and feature release.
+ fragments:
+ - 2.6.0.yml
+ - 354-remove-dead-code.yml
+ - 362-deprecations.yml
+ - 367-docker_container-ports-validation.yml
+ - 370-add-image-label-mismatch.yml
+ - 373-deprecate-docker-compose-dependency.yml
+ - deprecate-ansible-2.9-2.10.yml
+ release_date: '2022-05-24'
+ 2.7.0:
+ changes:
+ bugfixes:
+ - Docker SDK for Python based modules and plugins - if the API version is specified
+ as an option, use that one to validate API version requirements of module/plugin
+ options instead of the latest API version supported by the Docker daemon.
+ This also avoids one unnecessary API call per module/plugin (https://github.com/ansible-collections/community.docker/pull/389).
+ deprecated_features:
+ - Support for Docker API version 1.20 to 1.24 has been deprecated and will be
+ removed in community.docker 3.0.0. The first Docker version supporting API
+ version 1.25 was Docker 1.13, released in January 2017. This affects the modules
+ ``docker_container``, ``docker_container_exec``, ``docker_container_info``,
+ ``docker_compose``, ``docker_login``, ``docker_image``, ``docker_image_info``,
+ ``docker_image_load``, ``docker_host_info``, ``docker_network``, ``docker_network_info``,
+ ``docker_node_info``, ``docker_swarm_info``, ``docker_swarm_service``, ``docker_swarm_service_info``,
+ ``docker_volume_info``, and ``docker_volume``, whose minimally supported API
+ version is between 1.20 and 1.24 (https://github.com/ansible-collections/community.docker/pull/396).
+ - Support for Python 2.6 is deprecated and will be removed in the next major
+ release (community.docker 3.0.0). Some modules might still work with Python
+ 2.6, but we will no longer try to ensure compatibility (https://github.com/ansible-collections/community.docker/pull/388).
+ minor_changes:
+ - Move common utility functions from the ``common`` module_util to a new module_util
+ called ``util``. This should not have any user-visible effect (https://github.com/ansible-collections/community.docker/pull/390).
+ release_summary: Bugfix and deprecation release. The next 2.x.y releases will
+ only be bugfix releases, the next expect minor/major release will be 3.0.0
+ with some major changes.
+ fragments:
+ - 2.7.0.yml
+ - 389-api-version.yml
+ - 390-util.yml
+ - 397-deprecate-docker-api-1.24.yml
+ - python-2.6.yml
+ release_date: '2022-07-02'
+ 3.0.0:
+ changes:
+ bugfixes:
+ - docker_plugin - fix crash when handling plugin options (https://github.com/ansible-collections/community.docker/issues/446,
+ https://github.com/ansible-collections/community.docker/pull/447).
+ - docker_stack - fix broken string formatting when reporting error in case ``compose``
+ was containing invalid values (https://github.com/ansible-collections/community.docker/pull/448).
+ minor_changes:
+ - modules and plugins communicating directly with the Docker daemon - simplify
+ use of helper function that was removed in Docker SDK for Python to find executables
+ (https://github.com/ansible-collections/community.docker/pull/438).
+ release_summary: The 3.0.0 release features a rewrite of the ``docker_container``
+ module, and many modules and plugins no longer depend on the Docker SDK for
+ Python.
+ fragments:
+ - 3.0.0.yml
+ - 438-docker-py.yml
+ - 447-docker_plugin-bug.yml
+ - 448-docker_stack-error.yml
+ release_date: '2022-08-12'
+ 3.0.0-a1:
+ changes:
+ breaking_changes:
+ - This collection does not work with ansible-core 2.11 on Python 3.12+. Please
+ either upgrade to ansible-core 2.12+, or use Python 3.11 or earlier (https://github.com/ansible-collections/community.docker/pull/271).
+ major_changes:
+ - The collection now contains vendored code from the Docker SDK for Python to
+ talk to the Docker daemon. Modules and plugins using this code no longer need
+ the Docker SDK for Python installed on the machine the module or plugin is
+ running on (https://github.com/ansible-collections/community.docker/pull/398).
+ - docker_api connection plugin - no longer uses the Docker SDK for Python. It
+ requires ``requests`` to be installed, and depending on the features used
+ has some more requirements. If the Docker SDK for Python is installed, these
+ requirements are likely met (https://github.com/ansible-collections/community.docker/pull/414).
+ - docker_container_exec - no longer uses the Docker SDK for Python. It requires
+ ``requests`` to be installed, and depending on the features used has some
+ more requirements. If the Docker SDK for Python is installed, these requirements
+ are likely met (https://github.com/ansible-collections/community.docker/pull/401).
+ - docker_container_info - no longer uses the Docker SDK for Python. It requires
+ ``requests`` to be installed, and depending on the features used has some
+ more requirements. If the Docker SDK for Python is installed, these requirements
+ are likely met (https://github.com/ansible-collections/community.docker/pull/402).
+ - docker_containers inventory plugin - no longer uses the Docker SDK for Python.
+ It requires ``requests`` to be installed, and depending on the features used
+ has some more requirements. If the Docker SDK for Python is installed, these
+ requirements are likely met (https://github.com/ansible-collections/community.docker/pull/413).
+ - docker_host_info - no longer uses the Docker SDK for Python. It requires ``requests``
+ to be installed, and depending on the features used has some more requirements.
+ If the Docker SDK for Python is installed, these requirements are likely met
+ (https://github.com/ansible-collections/community.docker/pull/403).
+ - docker_image - no longer uses the Docker SDK for Python. It requires ``requests``
+ to be installed, and depending on the features used has some more requirements.
+ If the Docker SDK for Python is installed, these requirements are likely met
+ (https://github.com/ansible-collections/community.docker/pull/404).
+ - docker_image_info - no longer uses the Docker SDK for Python. It requires
+ ``requests`` to be installed, and depending on the features used has some
+ more requirements. If the Docker SDK for Python is installed, these requirements
+ are likely met (https://github.com/ansible-collections/community.docker/pull/405).
+ - docker_image_load - no longer uses the Docker SDK for Python. It requires
+ ``requests`` to be installed, and depending on the features used has some
+ more requirements. If the Docker SDK for Python is installed, these requirements
+ are likely met (https://github.com/ansible-collections/community.docker/pull/406).
+ - docker_login - no longer uses the Docker SDK for Python. It requires ``requests``
+ to be installed, and depending on the features used has some more requirements.
+ If the Docker SDK for Python is installed, these requirements are likely met
+ (https://github.com/ansible-collections/community.docker/pull/407).
+ - docker_network - no longer uses the Docker SDK for Python. It requires ``requests``
+ to be installed, and depending on the features used has some more requirements.
+ If the Docker SDK for Python is installed, these requirements are likely met
+ (https://github.com/ansible-collections/community.docker/pull/408).
+ - docker_network_info - no longer uses the Docker SDK for Python. It requires
+ ``requests`` to be installed, and depending on the features used has some
+ more requirements. If the Docker SDK for Python is installed, these requirements
+ are likely met (https://github.com/ansible-collections/community.docker/pull/409).
+ - docker_prune - no longer uses the Docker SDK for Python. It requires ``requests``
+ to be installed, and depending on the features used has some more requirements.
+ If the Docker SDK for Python is installed, these requirements are likely met
+ (https://github.com/ansible-collections/community.docker/pull/410).
+ - docker_volume - no longer uses the Docker SDK for Python. It requires ``requests``
+ to be installed, and depending on the features used has some more requirements.
+ If the Docker SDK for Python is installed, these requirements are likely met
+ (https://github.com/ansible-collections/community.docker/pull/411).
+ - docker_volume_info - no longer uses the Docker SDK for Python. It requires
+ ``requests`` to be installed, and depending on the features used has some
+ more requirements. If the Docker SDK for Python is installed, these requirements
+ are likely met (https://github.com/ansible-collections/community.docker/pull/412).
+ minor_changes:
+ - Remove vendored copy of ``distutils.version`` in favor of vendored copy included
+ with ansible-core 2.12+. For ansible-core 2.11, uses ``distutils.version``
+ for Python < 3.12. There is no support for ansible-core 2.11 with Python 3.12+
+ (https://github.com/ansible-collections/community.docker/pull/271).
+ - socker_handler and socket_helper module utils - improve Python forward compatibilty,
+ create helper functions for file blocking/unblocking (https://github.com/ansible-collections/community.docker/pull/415).
+ release_summary: First alpha prerelease of community.docker 3.0.0. This version
+ has several breaking changes and features rewrites of several modules to directly
+ use the API using ``requests``, instead of using the Docker SDK for Python.
+ removed_features:
+ - Execution Environments built with community.docker no longer include docker-compose
+ < 2.0.0. If you need to use it with the ``docker_compose`` module, please
+ install that requirement manually (https://github.com/ansible-collections/community.docker/pull/400).
+ - Support for Ansible 2.9 and ansible-base 2.10 has been removed. If you need
+ support for Ansible 2.9 or ansible-base 2.10, please use community.docker
+ 2.x.y (https://github.com/ansible-collections/community.docker/pull/400).
+ - Support for Docker API versions 1.20 to 1.24 has been removed. If you need
+ support for these API versions, please use community.docker 2.x.y (https://github.com/ansible-collections/community.docker/pull/400).
+ - Support for Python 2.6 has been removed. If you need support for Python 2.6,
+ please use community.docker 2.x.y (https://github.com/ansible-collections/community.docker/pull/400).
+ - Various modules - the default of ``tls_hostname`` (``localhost``) has been
+ removed. If you want to continue using ``localhost``, you need to specify
+ it explicitly (https://github.com/ansible-collections/community.docker/pull/363).
+ - docker_container - the ``all`` value is no longer allowed in ``published_ports``.
+ Use ``publish_all_ports=true`` instead (https://github.com/ansible-collections/community.docker/pull/399).
+ - docker_container - the default of ``command_handling`` was changed from ``compatibility``
+ to ``correct``. Older versions were warning for every invocation of the module
+ when this would result in a change of behavior (https://github.com/ansible-collections/community.docker/pull/399).
+ - docker_stack - the return values ``out`` and ``err`` have been removed. Use
+ ``stdout`` and ``stderr`` instead (https://github.com/ansible-collections/community.docker/pull/363).
+ fragments:
+ - 271-distutils-vendor-removed.yml
+ - 3.0.0-a1.yml
+ - 363-deprecations.yml
+ - 398-docker-api.yml
+ - 399-deprecations.yml
+ - 400-deprecations.yml
+ - 401-docker_container_exec-docker-api.yml
+ - 402-docker-api.yml
+ - 403-docker-api.yml
+ - 404-docker-api.yml
+ - 405-docker-api.yml
+ - 406-docker-api.yml
+ - 407-docker-api.yml
+ - 408-docker-api.yml
+ - 409-docker-api.yml
+ - 410-docker-api.yml
+ - 411-docker-api.yml
+ - 412-docker-api.yml
+ - 413-docker-api.yml
+ - 414-docker-api.yml
+ - 415-socket-improvements.yml
+ release_date: '2022-07-07'
+ 3.0.0-a2:
+ changes:
+ breaking_changes:
+ - docker_container - ``exposed_ports`` is no longer ignored in ``comparisons``.
+ Before, its value was assumed to be identical with the value of ``published_ports``
+ (https://github.com/ansible-collections/community.docker/pull/422).
+ - docker_container - ``log_options`` can no longer be specified when ``log_driver``
+ is not specified (https://github.com/ansible-collections/community.docker/pull/422).
+ - docker_container - ``publish_all_ports`` is no longer ignored in ``comparisons``
+ (https://github.com/ansible-collections/community.docker/pull/422).
+ - docker_container - ``restart_retries`` can no longer be specified when ``restart_policy``
+ is not specified (https://github.com/ansible-collections/community.docker/pull/422).
+ - docker_container - ``stop_timeout`` is no longer ignored for idempotency if
+ told to be not ignored in ``comparisons``. So far it defaulted to ``ignore``
+ there, and setting it to ``strict`` had no effect (https://github.com/ansible-collections/community.docker/pull/422).
+ major_changes:
+ - docker_container - no longer uses the Docker SDK for Python. It requires ``requests``
+ to be installed, and depending on the features used has some more requirements.
+ If the Docker SDK for Python is installed, these requirements are likely met
+ (https://github.com/ansible-collections/community.docker/pull/422).
+ - docker_container - the module was completely rewritten from scratch (https://github.com/ansible-collections/community.docker/pull/422).
+ - docker_plugin - no longer uses the Docker SDK for Python. It requires ``requests``
+ to be installed, and depending on the features used has some more requirements.
+ If the Docker SDK for Python is installed, these requirements are likely met
+ (https://github.com/ansible-collections/community.docker/pull/429).
+ minor_changes:
+ - docker_container - add a new parameter ``image_comparison`` to control the
+ behavior for which image will be used for idempotency checks (https://github.com/ansible-collections/community.docker/issues/421,
+ https://github.com/ansible-collections/community.docker/pull/428).
+ - docker_container - add support for ``cgroupns_mode`` (https://github.com/ansible-collections/community.docker/issues/338,
+ https://github.com/ansible-collections/community.docker/pull/427).
+ - docker_container - allow to specify ``platform`` (https://github.com/ansible-collections/community.docker/issues/123,
+ https://github.com/ansible-collections/community.docker/pull/426).
+ release_summary: 'Second alpha prerelease of community.docker 3.0.0. This version
+ again has several breaking changes
+
+ and features rewrites of several modules to directly use the API using ``requests``,
+ instead of using
+
+ the Docker SDK for Python.
+
+
+ The largest change to the previous 3.0.0-a1 prerelease is that ``docker_container``
+ module has been
+
+ rewritten. It now also no longer needs the Docker SDK for Python, which allowed
+ to implement some new
+
+ features that were not available before (``platform`` and ``cgroupns_mode``
+ parameters).
+
+ '
+ fragments:
+ - 3.0.0-a2.yml
+ - 426-docker_container-platform.yml
+ - 427-docker_container-cgroupns_mode.yml
+ - 428-docker_container-image-ignore.yml
+ - 429-docker_plugin.yml
+ - docker_container.yml
+ release_date: '2022-07-15'
+ 3.0.0-a3:
+ changes:
+ minor_changes:
+ - All software licenses are now in the ``LICENSES/`` directory of the collection
+ root. Moreover, ``SPDX-License-Identifier:`` is used to declare the applicable
+ license for every file that is not automatically generated (https://github.com/ansible-collections/community.docker/pull/430).
+ release_summary: No content changes except some license declaration adjustments.
+ This is mainly a trial run to see whether this is causing unexpected problems.
+ fragments:
+ - 3.0.0-a3.yml
+ - 430-licenses.yml
+ release_date: '2022-07-23'
+ 3.0.0-rc1:
+ changes:
+ bugfixes:
+ - modules and plugins communicating directly with the Docker daemon - prevent
+ crash when TLS is used (https://github.com/ansible-collections/community.docker/pull/432).
+ release_summary: First release candidate for community.docker 3.0.0. As long
+ as more bugs are found new release candidates will be released.
+ fragments:
+ - 3.0.0-rc1.yml
+ - 432-tls.yml
+ release_date: '2022-07-26'
+ 3.0.0-rc2:
+ changes:
+ breaking_changes:
+ - modules and plugins communicating directly with the Docker daemon - when connecting
+ by SSH and not using ``use_ssh_client=true``, reject unknown host keys instead
+ of accepting them. This is only a breaking change relative to older community.docker
+ 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker
+ SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+ bugfixes:
+ - docker_image - when composing the build context, trim trailing whitespace
+ from ``.dockerignore`` entries. This is only a change relative to older community.docker
+ 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker
+ SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+ - modules and plugins communicating directly with the Docker daemon - do not
+ create a subshell for SSH connections when using ``use_ssh_client=true``.
+ This is only a change relative to older community.docker 3.0.0 pre-releases
+ or with respect to Docker SDK for Python < 6.0.0. Docker SDK for Python 6.0.0
+ will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+ - modules and plugins communicating directly with the Docker daemon - fix ``ProxyCommand``
+ handling for SSH connections when not using ``use_ssh_client=true``. This
+ is only a change relative to older community.docker 3.0.0 pre-releases or
+ with respect to Docker SDK for Python < 6.0.0. Docker SDK for Python 6.0.0
+ will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+ - modules and plugins communicating directly with the Docker daemon - fix parsing
+ of IPv6 addresses with a port in ``docker_host``. This is only a change relative
+ to older community.docker 3.0.0 pre-releases or with respect to Docker SDK
+ for Python < 6.0.0. Docker SDK for Python 6.0.0 will also include this change
+ (https://github.com/ansible-collections/community.docker/pull/434).
+ minor_changes:
+ - modules and plugins communicating directly with the Docker daemon - improve
+ default TLS version selection for Python 3.6 and newer. This is only a change
+ relative to older community.docker 3.0.0 pre-releases or with respect to Docker
+ SDK for Python < 6.0.0. Docker SDK for Python 6.0.0 will also include this
+ change (https://github.com/ansible-collections/community.docker/pull/434).
+ release_summary: Second release candidate for community.docker 3.0.0. As long
+ as more bugs are found new release candidates will be released.
+ security_fixes:
+ - modules and plugins communicating directly with the Docker daemon - when connecting
+ by SSH and not using ``use_ssh_client=true``, reject unknown host keys instead
+ of accepting them. This is only a change relative to older community.docker
+ 3.0.0 pre-releases or with respect to Docker SDK for Python < 6.0.0. Docker
+ SDK for Python 6.0.0 will also include this change (https://github.com/ansible-collections/community.docker/pull/434).
+ fragments:
+ - 3.0.0-rc2.yml
+ - docker-py-changes-1.yml
+ release_date: '2022-07-31'
+ 3.0.1:
+ changes:
+ bugfixes:
+ - docker_container - fix handling of ``env_file`` (https://github.com/ansible-collections/community.docker/issues/451,
+ https://github.com/ansible-collections/community.docker/pull/452).
+ release_summary: Bugfix release.
+ fragments:
+ - 3.0.1.yml
+ - 452-docker_container-env_file.yml
+ release_date: '2022-08-15'
+ 3.0.2:
+ changes:
+ bugfixes:
+ - docker_image - fix build argument handling (https://github.com/ansible-collections/community.docker/issues/455,
+ https://github.com/ansible-collections/community.docker/pull/456).
+ release_summary: Bugfix release.
+ fragments:
+ - 3.0.2.yml
+ - 456-docker_image-build-args.yml
+ release_date: '2022-08-16'
+ 3.1.0:
+ changes:
+ minor_changes:
+ - The collection repository conforms to the `REUSE specification <https://reuse.software/spec/>`__
+ except for the changelog fragments (https://github.com/ansible-collections/community.docker/pull/462).
+ - docker_swarm - allows usage of the ``data_path_port`` parameter when initializing
+ a swarm (https://github.com/ansible-collections/community.docker/issues/296).
+ release_summary: Feature release.
+ fragments:
+ - 3.1.0.yml
+ - 466-add-data-path-port.yml
+ - licenses.yml
+ release_date: '2022-09-08'
+ 3.2.0:
+ changes:
+ deprecated_features:
+ - 'docker_container - the ``ignore_image`` option is deprecated and will be
+ removed in community.docker 4.0.0. Use ``image: ignore`` in ``comparisons``
+ instead (https://github.com/ansible-collections/community.docker/pull/487).'
+ - 'docker_container - the ``purge_networks`` option is deprecated and will be
+ removed in community.docker 4.0.0. Use ``networks: strict`` in ``comparisons``
+ instead, and make sure to provide ``networks``, with value ``[]`` if all networks
+ should be removed (https://github.com/ansible-collections/community.docker/pull/487).'
+ minor_changes:
+ - docker_container - added ``image_name_mismatch`` option which allows to control
+ the behavior if the container uses the image specified, but the container's
+ configuration uses a different name for the image than the one provided to
+ the module (https://github.com/ansible-collections/community.docker/issues/485,
+ https://github.com/ansible-collections/community.docker/pull/488).
+ release_summary: Feature and deprecation release.
+ fragments:
+ - 3.2.0.yml
+ - 487-docker_container-deprecate.yml
+ - 488-docker_container-image-name.yml
+ release_date: '2022-11-01'
+ 3.2.1:
+ changes:
+ release_summary: Maintenance release with improved documentation.
+ fragments:
+ - 3.2.1.yml
+ release_date: '2022-11-06'
+ 3.2.2:
+ changes:
+ bugfixes:
+ - docker_container - the ``kill_signal`` option erroneously did not accept strings
+ anymore since 3.0.0 (https://github.com/ansible-collections/community.docker/issues/505,
+ https://github.com/ansible-collections/community.docker/pull/506).
+ release_summary: Bugfix release.
+ fragments:
+ - 3.2.2.yml
+ - 506-docker_container-kill_signal.yml
+ release_date: '2022-11-28'
+ 3.3.0:
+ changes:
+ bugfixes:
+ - docker_container_exec - fix ``chdir`` option which was ignored since community.docker
+ 3.0.0 (https://github.com/ansible-collections/community.docker/issues/517,
+ https://github.com/ansible-collections/community.docker/pull/518).
+ - vendored latest Docker SDK for Python bugfix (https://github.com/ansible-collections/community.docker/pull/513,
+ https://github.com/docker/docker-py/issues/3045).
+ minor_changes:
+ - current_container_facts - make work with current Docker version, also support
+ Podman (https://github.com/ansible-collections/community.docker/pull/510).
+ - docker_image - when using ``archive_path``, detect whether changes are necessary
+ based on the image ID (hash). If the existing tar archive matches the source,
+ do nothing. Previously, each task execution re-created the archive (https://github.com/ansible-collections/community.docker/pull/500).
+ release_summary: Feature and bugfix release.
+ fragments:
+ - 3.3.0.yml
+ - 500-idempotent-image-archival.yaml
+ - 510-current_container_facts.yml
+ - 513-api-npipe.yml
+ - 518-docker_container_exec-workdir.yml
+ release_date: '2022-12-03'
+ 3.3.1:
+ changes:
+ bugfixes:
+ - current_container_facts - make container detection work better in more cases
+ (https://github.com/ansible-collections/community.docker/pull/522).
+ release_summary: Bugfix release.
+ fragments:
+ - 3.3.1.yml
+ - 522-current-image.yml
+ release_date: '2022-12-06'
+ 3.3.2:
+ changes:
+ bugfixes:
+ - docker_container - when ``detach=false``, wait indefinitely and not at most
+ one minute. This was the behavior with Docker SDK for Python, and was accidentally
+ changed in 3.0.0 (https://github.com/ansible-collections/community.docker/issues/526,
+ https://github.com/ansible-collections/community.docker/pull/527).
+ release_summary: Bugfix release.
+ fragments:
+ - 3.3.2.yml
+ - 527-container-wait.yml
+ release_date: '2022-12-09'
+ 3.4.0:
+ changes:
+ bugfixes:
+ - docker_api connection plugin - fix error handling when 409 Conflict is returned
+ by the Docker daemon in case of a stopped container (https://github.com/ansible-collections/community.docker/pull/546).
+ - docker_container_exec - fix error handling when 409 Conflict is returned by
+ the Docker daemon in case of a stopped container (https://github.com/ansible-collections/community.docker/pull/546).
+ - docker_plugin - do not crash if plugin is installed in check mode (https://github.com/ansible-collections/community.docker/issues/552,
+ https://github.com/ansible-collections/community.docker/pull/553).
+ - most modules - fix handling of ``DOCKER_TIMEOUT`` environment variable, and
+ improve handling of other fallback environment variables (https://github.com/ansible-collections/community.docker/issues/551,
+ https://github.com/ansible-collections/community.docker/pull/554).
+ minor_changes:
+ - docker_api connection plugin - when copying files to/from a container, stream
+ the file contents instead of first reading them to memory (https://github.com/ansible-collections/community.docker/pull/545).
+ - docker_host_info - allow to list all containers with new option ``containers_all``
+ (https://github.com/ansible-collections/community.docker/issues/535, https://github.com/ansible-collections/community.docker/pull/538).
+ release_summary: Regular bugfix and feature release.
+ fragments:
+ - 3.4.0.yml
+ - 538-docker_host_info-all-containers.yml
+ - 545-docker_api.yml
+ - 546-conflict-error.yml
+ - 553-docker_plugin-check-mode.yml
+ - 554-env-vars.yml
+ modules:
+ - description: Copy a file into a Docker container
+ name: docker_container_copy_into
+ namespace: ''
+ release_date: '2023-01-14'
+ 3.4.1:
+ changes:
+ bugfixes:
+ - docker_api connection plugin, docker_container_exec, docker_container_copy_into
+ - properly close socket to Daemon after executing commands in containers (https://github.com/ansible-collections/community.docker/pull/582).
+ - docker_container - fix ``tmfs_size`` and ``tmpfs_mode`` not being set (https://github.com/ansible-collections/community.docker/pull/580).
+ - various plugins and modules - remove unnecessary imports (https://github.com/ansible-collections/community.docker/pull/574).
+ release_summary: Regular bugfix release.
+ fragments:
+ - 3.4.1.yml
+ - 582-stream-close.yml
+ - fix-tmpfs_size-and-tmpfs_mode.yml
+ - remove-unneeded-imports.yml
+ release_date: '2023-02-20'
+ 3.4.2:
+ changes:
+ bugfixes:
+ - docker_prune - return correct value for ``changed``. So far the module always
+ claimed that nothing changed (https://github.com/ansible-collections/community.docker/pull/593).
+ release_summary: Bugfix release.
+ fragments:
+ - 3.4.2.yml
+ - 593-docker_prune-changed.yml
+ release_date: '2023-02-25'
diff --git a/ansible_collections/community/docker/changelogs/changelog.yaml.license b/ansible_collections/community/docker/changelogs/changelog.yaml.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/changelogs/changelog.yaml.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/changelogs/config.yaml b/ansible_collections/community/docker/changelogs/config.yaml
new file mode 100644
index 00000000..1c0c2d20
--- /dev/null
+++ b/ansible_collections/community/docker/changelogs/config.yaml
@@ -0,0 +1,34 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+changelog_filename_template: ../CHANGELOG.rst
+changelog_filename_version_depth: 0
+changes_file: changelog.yaml
+changes_format: combined
+keep_fragments: false
+mention_ancestor: true
+new_plugins_after_name: removed_features
+notesdir: fragments
+prelude_section_name: release_summary
+prelude_section_title: Release Summary
+sections:
+- - major_changes
+ - Major Changes
+- - minor_changes
+ - Minor Changes
+- - breaking_changes
+ - Breaking Changes / Porting Guide
+- - deprecated_features
+ - Deprecated Features
+- - removed_features
+ - Removed Features (previously deprecated)
+- - security_fixes
+ - Security Fixes
+- - bugfixes
+ - Bugfixes
+- - known_issues
+ - Known Issues
+title: Docker Community Collection
+trivial_section_name: trivial
diff --git a/ansible_collections/community/docker/changelogs/fragments/.keep b/ansible_collections/community/docker/changelogs/fragments/.keep
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/community/docker/changelogs/fragments/.keep
diff --git a/ansible_collections/community/docker/docs/docsite/extra-docs.yml b/ansible_collections/community/docker/docs/docsite/extra-docs.yml
new file mode 100644
index 00000000..c5047369
--- /dev/null
+++ b/ansible_collections/community/docker/docs/docsite/extra-docs.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+sections:
+ - title: Scenario Guide
+ toctree:
+ - scenario_guide
diff --git a/ansible_collections/community/docker/docs/docsite/links.yml b/ansible_collections/community/docker/docs/docsite/links.yml
new file mode 100644
index 00000000..8dbcfeca
--- /dev/null
+++ b/ansible_collections/community/docker/docs/docsite/links.yml
@@ -0,0 +1,27 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+edit_on_github:
+ repository: ansible-collections/community.docker
+ branch: main
+ path_prefix: ''
+
+extra_links:
+ - description: Submit a bug report
+ url: https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=bug_report.md
+ - description: Request a feature
+ url: https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=feature_request.md
+
+communication:
+ matrix_rooms:
+ - topic: General usage and support questions
+ room: '#users:ansible.im'
+ irc_channels:
+ - topic: General usage and support questions
+ network: Libera
+ channel: '#ansible'
+ mailing_lists:
+ - topic: Ansible Project List
+ url: https://groups.google.com/g/ansible-project
diff --git a/ansible_collections/community/docker/docs/docsite/rst/scenario_guide.rst b/ansible_collections/community/docker/docs/docsite/rst/scenario_guide.rst
new file mode 100644
index 00000000..e4b84431
--- /dev/null
+++ b/ansible_collections/community/docker/docs/docsite/rst/scenario_guide.rst
@@ -0,0 +1,232 @@
+..
+ Copyright (c) Ansible Project
+ GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+ SPDX-License-Identifier: GPL-3.0-or-later
+
+.. _ansible_collections.community.docker.docsite.scenario_guide:
+
+Docker Guide
+============
+
+The `community.docker collection <https://galaxy.ansible.com/community/docker>`_ offers several modules and plugins for orchestrating Docker containers and Docker Swarm.
+
+.. contents::
+ :local:
+ :depth: 1
+
+
+Requirements
+------------
+
+Most of the modules and plugins in community.docker require the `Docker SDK for Python <https://docker-py.readthedocs.io/en/stable/>`_. The SDK needs to be installed on the machines where the modules and plugins are executed, and for the Python version(s) with which the modules and plugins are executed. You can use the :ref:`community.general.python_requirements_info module <ansible_collections.community.general.python_requirements_info_module>` to make sure that the Docker SDK for Python is installed on the correct machine and for the Python version used by Ansible.
+
+Note that plugins (inventory plugins and connection plugins) are always executed in the context of Ansible itself. If you use a plugin that requires the Docker SDK for Python, you need to install it on the machine running ``ansible`` or ``ansible-playbook`` and for the same Python interpreter used by Ansible. To see which Python is used, run ``ansible --version``.
+
+You can install the Docker SDK for Python for Python 3.6 or later as follows:
+
+.. code-block:: bash
+
+ $ pip install docker
+
+For Python 2.7, you need to use a version between 2.0.0 and 4.4.4 since the Python package for Docker removed support for Python 2.7 on 5.0.0. You can install the specific version of the Docker SDK for Python as follows:
+
+.. code-block:: bash
+
+ $ pip install 'docker==4.4.4'
+
+Note that the Docker SDK for Python was called ``docker-py`` on PyPi before version 2.0.0. Please avoid installing this really old version, and make sure to not install both ``docker`` and ``docker-py``. Installing both will result in a broken installation. If this happens, Ansible will detect it and inform you about it. If that happens, you must uninstall both and reinstall the correct version. If in doubt, always install ``docker`` and never ``docker-py``.
+
+
+Connecting to the Docker API
+----------------------------
+
+You can connect to a local or remote API using parameters passed to each task or by setting environment variables. The order of precedence is command line parameters and then environment variables. If neither a command line option nor an environment variable is found, Ansible uses the default value provided under `Parameters`_.
+
+
+Parameters
+..........
+
+Most plugins and modules can be configured by the following parameters:
+
+ docker_host
+ The URL or Unix socket path used to connect to the Docker API. Defaults to ``unix://var/run/docker.sock``. To connect to a remote host, provide the TCP connection string (for example: ``tcp://192.0.2.23:2376``). If TLS is used to encrypt the connection to the API, then the module will automatically replace 'tcp' in the connection URL with 'https'.
+
+ api_version
+ The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported by the Docker SDK for Python installed.
+
+ timeout
+ The maximum amount of time in seconds to wait on a response from the API. Defaults to 60 seconds.
+
+ tls
+ Secure the connection to the API by using TLS without verifying the authenticity of the Docker host server. Defaults to ``false``.
+
+ validate_certs
+ Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. Default is ``false``.
+
+ cacert_path
+ Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+
+ cert_path
+ Path to the client's TLS certificate file.
+
+ key_path
+ Path to the client's TLS key file.
+
+ tls_hostname
+ When verifying the authenticity of the Docker Host server, provide the expected name of the server. Defaults to ``localhost``.
+
+ ssl_version
+ Provide a valid SSL version number. The default value is determined by the Docker SDK for Python.
+
+
+Environment variables
+.....................
+
+You can also control how the plugins and modules connect to the Docker API by setting the following environment variables.
+
+For plugins, they have to be set for the environment Ansible itself runs in. For modules, they have to be set for the environment the modules are executed in. For modules running on remote machines, the environment variables have to be set on that machine for the user used to execute the modules with.
+
+ DOCKER_HOST
+ The URL or Unix socket path used to connect to the Docker API.
+
+ DOCKER_API_VERSION
+ The version of the Docker API running on the Docker Host. Defaults to the latest version of the API supported
+ by Docker SDK for Python.
+
+ DOCKER_TIMEOUT
+ The maximum amount of time in seconds to wait on a response from the API.
+
+ DOCKER_CERT_PATH
+ Path to the directory containing the client certificate, client key and CA certificate.
+
+ DOCKER_SSL_VERSION
+ Provide a valid SSL version number.
+
+ DOCKER_TLS
+ Secure the connection to the API by using TLS without verifying the authenticity of the Docker Host.
+
+ DOCKER_TLS_VERIFY
+ Secure the connection to the API by using TLS and verify the authenticity of the Docker Host.
+
+
+Plain Docker daemon: images, networks, volumes, and containers
+--------------------------------------------------------------
+
+For working with a plain Docker daemon, that is without Swarm, there are connection plugins, an inventory plugin, and several modules available:
+
+ docker connection plugin
+ The :ref:`community.docker.docker connection plugin <ansible_collections.community.docker.docker_connection>` uses the Docker CLI utility to connect to Docker containers and execute modules in them. It essentially wraps ``docker exec`` and ``docker cp``. This connection plugin is supported by the :ref:`ansible.posix.synchronize module <ansible_collections.ansible.posix.synchronize_module>`.
+
+ docker_api connection plugin
+ The :ref:`community.docker.docker_api connection plugin <ansible_collections.community.docker.docker_api_connection>` talks directly to the Docker daemon to connect to Docker containers and execute modules in them.
+
+ docker_containers inventory plugin
+ The :ref:`community.docker.docker_containers inventory plugin <ansible_collections.community.docker.docker_containers_inventory>` allows you to dynamically add Docker containers from a Docker Daemon to your Ansible inventory. See :ref:`dynamic_inventory` for details on dynamic inventories.
+
+ The `docker inventory script <https://github.com/ansible-community/contrib-scripts/blob/main/inventory/docker.py>`_ is deprecated. Please use the inventory plugin instead. The inventory plugin has several compatibility options. If you need to collect Docker containers from multiple Docker daemons, you need to add every Docker daemon as an individual inventory source.
+
+ docker_host_info module
+ The :ref:`community.docker.docker_host_info module <ansible_collections.community.docker.docker_host_info_module>` allows you to retrieve information on a Docker daemon, such as all containers, images, volumes, networks and so on.
+
+ docker_login module
+ The :ref:`community.docker.docker_login module <ansible_collections.community.docker.docker_login_module>` allows you to log in and out of a remote registry, such as Docker Hub or a private registry. It provides similar functionality to the ``docker login`` and ``docker logout`` CLI commands.
+
+ docker_prune module
+ The :ref:`community.docker.docker_prune module <ansible_collections.community.docker.docker_prune_module>` allows you to prune no longer needed containers, images, volumes and so on. It provides similar functionality to the ``docker prune`` CLI command.
+
+ docker_image module
+ The :ref:`community.docker.docker_image module <ansible_collections.community.docker.docker_image_module>` provides full control over images, including: build, pull, push, tag and remove.
+
+ docker_image_info module
+ The :ref:`community.docker.docker_image_info module <ansible_collections.community.docker.docker_image_info_module>` allows you to list and inspect images.
+
+ docker_network module
+ The :ref:`community.docker.docker_network module <ansible_collections.community.docker.docker_network_module>` provides full control over Docker networks.
+
+ docker_network_info module
+ The :ref:`community.docker.docker_network_info module <ansible_collections.community.docker.docker_network_info_module>` allows you to inspect Docker networks.
+
+ docker_volume_info module
+ The :ref:`community.docker.docker_volume_info module <ansible_collections.community.docker.docker_volume_info_module>` provides full control over Docker volumes.
+
+ docker_volume module
+ The :ref:`community.docker.docker_volume module <ansible_collections.community.docker.docker_volume_module>` allows you to inspect Docker volumes.
+
+ docker_container module
+ The :ref:`community.docker.docker_container module <ansible_collections.community.docker.docker_container_module>` manages the container lifecycle by providing the ability to create, update, stop, start and destroy a Docker container.
+
+ docker_container_info module
+ The :ref:`community.docker.docker_container_info module <ansible_collections.community.docker.docker_container_info_module>` allows you to inspect a Docker container.
+
+
+Docker Compose
+--------------
+
+The :ref:`community.docker.docker_compose module <ansible_collections.community.docker.docker_compose_module>`
+allows you to use your existing Docker compose files to orchestrate containers on a single Docker daemon or on Swarm.
+Supports compose versions 1 and 2.
+
+Next to Docker SDK for Python, you need to install `docker-compose <https://github.com/docker/compose>`_ on the remote machines to use the module.
+
+
+Docker Machine
+--------------
+
+The :ref:`community.docker.docker_machine inventory plugin <ansible_collections.community.docker.docker_machine_inventory>` allows you to dynamically add Docker Machine hosts to your Ansible inventory.
+
+
+Docker stack
+------------
+
+The :ref:`community.docker.docker_stack module <ansible_collections.community.docker.docker_stack_module>` module allows you to control Docker stacks. Information on stacks can be retrieved by the :ref:`community.docker.docker_stack_info module <ansible_collections.community.docker.docker_stack_info_module>`, and information on stack tasks can be retrieved by the :ref:`community.docker.docker_stack_task_info module <ansible_collections.community.docker.docker_stack_task_info_module>`.
+
+
+Docker Swarm
+------------
+
+The community.docker collection provides multiple plugins and modules for managing Docker Swarms.
+
+Swarm management
+................
+
+One inventory plugin and several modules are provided to manage Docker Swarms:
+
+ docker_swarm inventory plugin
+ The :ref:`community.docker.docker_swarm inventory plugin <ansible_collections.community.docker.docker_swarm_inventory>` allows you to dynamically add all Docker Swarm nodes to your Ansible inventory.
+
+ docker_swarm module
+ The :ref:`community.docker.docker_swarm module <ansible_collections.community.docker.docker_swarm_module>` allows you to globally configure Docker Swarm manager nodes to join and leave swarms, and to change the Docker Swarm configuration.
+
+ docker_swarm_info module
+ The :ref:`community.docker.docker_swarm_info module <ansible_collections.community.docker.docker_swarm_info_module>` allows you to retrieve information on Docker Swarm.
+
+ docker_node module
+ The :ref:`community.docker.docker_node module <ansible_collections.community.docker.docker_node_module>` allows you to manage Docker Swarm nodes.
+
+ docker_node_info module
+ The :ref:`community.docker.docker_node_info module <ansible_collections.community.docker.docker_node_info_module>` allows you to retrieve information on Docker Swarm nodes.
+
+Configuration management
+........................
+
+The community.docker collection offers modules to manage Docker Swarm configurations and secrets:
+
+ docker_config module
+ The :ref:`community.docker.docker_config module <ansible_collections.community.docker.docker_config_module>` allows you to create and modify Docker Swarm configs.
+
+ docker_secret module
+ The :ref:`community.docker.docker_secret module <ansible_collections.community.docker.docker_secret_module>` allows you to create and modify Docker Swarm secrets.
+
+
+Swarm services
+..............
+
+Docker Swarm services can be created and updated with the :ref:`community.docker.docker_swarm_service module <ansible_collections.community.docker.docker_swarm_service_module>`, and information on them can be queried by the :ref:`community.docker.docker_swarm_service_info module <ansible_collections.community.docker.docker_swarm_service_info_module>`.
+
+
+Helpful links
+-------------
+
+Still using Dockerfile to build images? Check out `ansible-bender <https://github.com/ansible-community/ansible-bender>`_, and start building images from your Ansible playbooks.
+
+Use `Ansible Operator <https://learn.openshift.com/ansibleop/ansible-operator-overview/>`_ to launch your docker-compose file on `OpenShift <https://www.okd.io/>`_. Go from an app on your laptop to a fully scalable app in the cloud with Kubernetes in just a few moments.
diff --git a/ansible_collections/community/docker/meta/ee-bindep.txt b/ansible_collections/community/docker/meta/ee-bindep.txt
new file mode 100644
index 00000000..6ffd5ab0
--- /dev/null
+++ b/ansible_collections/community/docker/meta/ee-bindep.txt
@@ -0,0 +1,3 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/docker/meta/ee-requirements.txt b/ansible_collections/community/docker/meta/ee-requirements.txt
new file mode 100644
index 00000000..101b74b5
--- /dev/null
+++ b/ansible_collections/community/docker/meta/ee-requirements.txt
@@ -0,0 +1,14 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker
+requests
+paramiko
+
+# We assume that EEs are not based on Windows, and have Python >= 3.5.
+# (ansible-builder does not support conditionals, it will simply add
+# the following unconditionally to the requirements)
+#
+# pywin32 ; sys_platform == 'win32'
+# backports.ssl-match-hostname ; python_version < '3.5'
diff --git a/ansible_collections/community/docker/meta/execution-environment.yml b/ansible_collections/community/docker/meta/execution-environment.yml
new file mode 100644
index 00000000..9da98891
--- /dev/null
+++ b/ansible_collections/community/docker/meta/execution-environment.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: 1
+dependencies:
+ python: meta/ee-requirements.txt
+ system: meta/ee-bindep.txt
diff --git a/ansible_collections/community/docker/meta/runtime.yml b/ansible_collections/community/docker/meta/runtime.yml
new file mode 100644
index 00000000..7616e6fe
--- /dev/null
+++ b/ansible_collections/community/docker/meta/runtime.yml
@@ -0,0 +1,32 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+requires_ansible: '>=2.11.0'
+action_groups:
+ docker:
+ - docker_compose
+ - docker_config
+ - docker_container
+ - docker_container_copy_into
+ - docker_container_exec
+ - docker_container_info
+ - docker_host_info
+ - docker_image
+ - docker_image_info
+ - docker_image_load
+ - docker_login
+ - docker_network
+ - docker_network_info
+ - docker_node
+ - docker_node_info
+ - docker_plugin
+ - docker_prune
+ - docker_secret
+ - docker_swarm
+ - docker_swarm_info
+ - docker_swarm_service
+ - docker_swarm_service_info
+ - docker_volume
+ - docker_volume_info
diff --git a/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py b/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py
new file mode 100644
index 00000000..372cbd0a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import base64
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+from ansible_collections.community.docker.plugins.module_utils._scramble import unscramble
+
+
+class ActionModule(ActionBase):
+ # Set to True when transfering files to the remote
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ self._task.args['_max_file_size_for_diff'] = C.MAX_FILE_SIZE_FOR_DIFF
+
+ result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val))
+
+ if u'diff' in result and result[u'diff'].get(u'scrambled_diff'):
+ # Scrambling is not done for security, but to avoid no_log screwing up the diff
+ diff = result[u'diff']
+ key = base64.b64decode(diff.pop(u'scrambled_diff'))
+ for k in (u'before', u'after'):
+ if k in diff:
+ diff[k] = unscramble(diff[k], key)
+
+ return result
diff --git a/ansible_collections/community/docker/plugins/connection/docker.py b/ansible_collections/community/docker/plugins/connection/docker.py
new file mode 100644
index 00000000..ba224929
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/connection/docker.py
@@ -0,0 +1,452 @@
+# Based on the chroot connection plugin by Maykel Moya
+#
+# (c) 2014, Lorin Hochstein
+# (c) 2015, Leendert Brouwer (https://github.com/objectified)
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author:
+ - Lorin Hochestein (!UNKNOWN)
+ - Leendert Brouwer (!UNKNOWN)
+name: docker
+short_description: Run tasks in docker containers
+description:
+ - Run commands or put/fetch files to an existing docker container.
+ - Uses the Docker CLI to execute commands in the container. If you prefer
+ to directly connect to the Docker daemon, use the
+ R(community.docker.docker_api,ansible_collections.community.docker.docker_api_connection)
+ connection plugin.
+options:
+ remote_addr:
+ description:
+ - The name of the container you want to access.
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_docker_host
+ remote_user:
+ description:
+ - The user to execute as inside the container.
+ - If Docker is too old to allow this (< 1.7), the one set by Docker itself will be used.
+ vars:
+ - name: ansible_user
+ - name: ansible_docker_user
+ ini:
+ - section: defaults
+ key: remote_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ cli:
+ - name: user
+ keyword:
+ - name: remote_user
+ docker_extra_args:
+ description:
+ - Extra arguments to pass to the docker command line.
+ default: ''
+ vars:
+ - name: ansible_docker_extra_args
+ ini:
+ - section: docker_connection
+ key: extra_cli_args
+ container_timeout:
+ default: 10
+ description:
+ - Controls how long we can wait to access reading output from the container once execution started.
+ env:
+ - name: ANSIBLE_TIMEOUT
+ - name: ANSIBLE_DOCKER_TIMEOUT
+ version_added: 2.2.0
+ ini:
+ - key: timeout
+ section: defaults
+ - key: timeout
+ section: docker_connection
+ version_added: 2.2.0
+ vars:
+ - name: ansible_docker_timeout
+ version_added: 2.2.0
+ cli:
+ - name: timeout
+ type: integer
+'''
+
+import fcntl
+import os
+import os.path
+import subprocess
+import re
+
+from ansible.compat import selectors
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local docker based connections '''
+
+ transport = 'community.docker.docker'
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ # Note: docker supports running as non-root in some configurations.
+ # (For instance, setting the UNIX socket file to be readable and
+ # writable by a specific UNIX group and then putting users into that
+ # group). Therefore we don't check that the user is root when using
+ # this connection. But if the user is getting a permission denied
+ # error it probably means that docker on their system is only
+ # configured to be connected to by root and they are not running as
+ # root.
+
+ self._docker_args = []
+ self._container_user_cache = {}
+ self._version = None
+
+ # Windows uses Powershell modules
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+
+ if 'docker_command' in kwargs:
+ self.docker_cmd = kwargs['docker_command']
+ else:
+ try:
+ self.docker_cmd = get_bin_path('docker')
+ except ValueError:
+ raise AnsibleError("docker command not found in PATH")
+
+ @staticmethod
+ def _sanitize_version(version):
+ version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
+ version = re.sub(u'^v', u'', version)
+ return version
+
+ def _old_docker_version(self):
+ cmd_args = self._docker_args
+
+ old_version_subcommand = ['version']
+
+ old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
+ p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+
+ return old_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _new_docker_version(self):
+ # no result yet, must be newer Docker version
+ cmd_args = self._docker_args
+
+ new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
+
+ new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
+ p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+ return new_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _get_docker_version(self):
+
+ cmd, cmd_output, err, returncode = self._old_docker_version()
+ if returncode == 0:
+ for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
+ if line.startswith(u'Server version:'): # old docker versions
+ return self._sanitize_version(line.split()[2])
+
+ cmd, cmd_output, err, returncode = self._new_docker_version()
+ if returncode:
+ raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
+
+ return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
+
+ def _get_docker_remote_user(self):
+ """ Get the default user configured in the docker container """
+ container = self.get_option('remote_addr')
+ if container in self._container_user_cache:
+ return self._container_user_cache[container]
+ p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', container],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ out, err = p.communicate()
+ out = to_text(out, errors='surrogate_or_strict')
+
+ if p.returncode != 0:
+ display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
+ self._container_user_cache[container] = None
+ return None
+
+ # The default exec user is root, unless it was changed in the Dockerfile with USER
+ user = out.strip() or u'root'
+ self._container_user_cache[container] = user
+ return user
+
+ def _build_exec_cmd(self, cmd):
+ """ Build the local docker exec command to run cmd on remote_host
+
+ If remote_user is available and is supported by the docker
+ version we are using, it will be provided to docker exec.
+ """
+
+ local_cmd = [self.docker_cmd]
+
+ if self._docker_args:
+ local_cmd += self._docker_args
+
+ local_cmd += [b'exec']
+
+ if self.remote_user is not None:
+ local_cmd += [b'-u', self.remote_user]
+
+ # -i is needed to keep stdin open which allows pipelining to work
+ local_cmd += [b'-i', self.get_option('remote_addr')] + cmd
+
+ return local_cmd
+
+ def _set_docker_args(self):
+ # TODO: this is mostly for backwards compatibility, play_context is used as fallback for older versions
+ # docker arguments
+ del self._docker_args[:]
+ extra_args = self.get_option('docker_extra_args') or getattr(self._play_context, 'docker_extra_args', '')
+ if extra_args:
+ self._docker_args += extra_args.split(' ')
+
+ def _set_conn_data(self):
+
+ ''' initialize for the connection, cannot do only in init since all data is not ready at that point '''
+
+ self._set_docker_args()
+
+ self.remote_user = self.get_option('remote_user')
+ if self.remote_user is None and self._play_context.remote_user is not None:
+ self.remote_user = self._play_context.remote_user
+
+ # timeout, use unless default and pc is different, backwards compat
+ self.timeout = self.get_option('container_timeout')
+ if self.timeout == 10 and self.timeout != self._play_context.timeout:
+ self.timeout = self._play_context.timeout
+
+ @property
+ def docker_version(self):
+
+ if not self._version:
+ self._set_docker_args()
+
+ self._version = self._get_docker_version()
+ if self._version == u'dev':
+ display.warning(u'Docker version number is "dev". Will assume latest version.')
+ if self._version != u'dev' and LooseVersion(self._version) < LooseVersion(u'1.3'):
+ raise AnsibleError('docker connection type requires docker 1.3 or higher')
+ return self._version
+
+ def _get_actual_user(self):
+ if self.remote_user is not None:
+ # An explicit user is provided
+ if self.docker_version == u'dev' or LooseVersion(self.docker_version) >= LooseVersion(u'1.7'):
+ # Support for specifying the exec user was added in docker 1.7
+ return self.remote_user
+ else:
+ self.remote_user = None
+ actual_user = self._get_docker_remote_user()
+ if actual_user != self.get_option('remote_user'):
+ display.warning(u'docker {0} does not support remote_user, using container default: {1}'
+ .format(self.docker_version, self.actual_user or u'?'))
+ return actual_user
+ elif self._display.verbosity > 2:
+ # Since we're not setting the actual_user, look it up so we have it for logging later
+ # Only do this if display verbosity is high enough that we'll need the value
+ # This saves overhead from calling into docker when we don't need to.
+ return self._get_docker_remote_user()
+ else:
+ return None
+
+ def _connect(self, port=None):
+ """ Connect to the container. Nothing to do """
+ super(Connection, self)._connect()
+ if not self._connected:
+ self._set_conn_data()
+ actual_user = self._get_actual_user()
+ display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
+ actual_user or u'?'), host=self.get_option('remote_addr')
+ )
+ self._connected = True
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ Run a command on the docker host """
+
+ self._set_conn_data()
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
+
+ display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self.get_option('remote_addr'))
+ display.debug("opening command with Popen()")
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ p = subprocess.Popen(
+ local_cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ display.debug("done running command with Popen()")
+
+ if self.become and self.become.expect_prompt() and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ become_output = b''
+ try:
+ while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
+ events = selector.select(self.timeout)
+ if not events:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ chunk = p.stdout.read()
+ elif key.fileobj == p.stderr:
+ chunk = p.stderr.read()
+
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
+ become_output += chunk
+ finally:
+ selector.close()
+
+ if not self.become.check_success(become_output):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate(in_data)
+ display.debug("done communicating")
+
+ display.debug("done with docker.exec_command()")
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ return ntpath.normpath(remote_path)
+ else:
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ Transfer a file from local to docker container """
+ self._set_conn_data()
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
+
+ out_path = self._prefix_login_path(out_path)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound(
+ "file or module does not exist: %s" % to_native(in_path))
+
+ out_path = shlex_quote(out_path)
+ # Older docker doesn't have native support for copying files into
+ # running containers, so we use docker exec to implement this
+ # Although docker version 1.8 and later provide support, the
+ # owner and group of the files are always set to root
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ try:
+ p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
+ (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
+
+ def fetch_file(self, in_path, out_path):
+ """ Fetch a file from container to local. """
+ self._set_conn_data()
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
+
+ in_path = self._prefix_login_path(in_path)
+ # out_path is the final file path, but docker takes a directory, not a
+ # file path
+ out_dir = os.path.dirname(out_path)
+
+ args = [self.docker_cmd, "cp", "%s:%s" % (self.get_option('remote_addr'), in_path), out_dir]
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
+ else:
+ actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
+
+ if p.returncode != 0:
+ # Older docker doesn't have native support for fetching files command `cp`
+ # If `cp` fails, try to use `dd` instead
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
+ try:
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=out_file, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ # Rename if needed
+ if actual_out_path != out_path:
+ os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
+
+ def close(self):
+ """ Terminate the connection. Nothing to do for Docker"""
+ super(Connection, self).close()
+ self._connected = False
+
+ def reset(self):
+ # Clear container user cache
+ self._container_user_cache = {}
diff --git a/ansible_collections/community/docker/plugins/connection/docker_api.py b/ansible_collections/community/docker/plugins/connection/docker_api.py
new file mode 100644
index 00000000..bbc13989
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/connection/docker_api.py
@@ -0,0 +1,338 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author:
+ - Felix Fontein (@felixfontein)
+name: docker_api
+short_description: Run tasks in docker containers
+version_added: 1.1.0
+description:
+ - Run commands or put/fetch files to an existing docker container.
+ - Uses the L(requests library,https://pypi.org/project/requests/) to interact
+ directly with the Docker daemon instead of using the Docker CLI. Use the
+ R(community.docker.docker,ansible_collections.community.docker.docker_connection)
+ connection plugin if you want to use the Docker CLI.
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.docker.var_names
+options:
+ remote_user:
+ type: str
+ description:
+ - The user to execute as inside the container.
+ vars:
+ - name: ansible_user
+ - name: ansible_docker_user
+ ini:
+ - section: defaults
+ key: remote_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ cli:
+ - name: user
+ keyword:
+ - name: remote_user
+ remote_addr:
+ type: str
+ description:
+ - The name of the container you want to access.
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_docker_host
+ container_timeout:
+ default: 10
+ description:
+ - Controls how long we can wait to access reading output from the container once execution started.
+ env:
+ - name: ANSIBLE_TIMEOUT
+ - name: ANSIBLE_DOCKER_TIMEOUT
+ version_added: 2.2.0
+ ini:
+ - key: timeout
+ section: defaults
+ - key: timeout
+ section: docker_connection
+ version_added: 2.2.0
+ vars:
+ - name: ansible_docker_timeout
+ version_added: 2.2.0
+ cli:
+ - name: timeout
+ type: integer
+'''
+
+import os
+import os.path
+
+from ansible.errors import AnsibleFileNotFound, AnsibleConnectionFailure
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.copy import (
+ DockerFileCopyError,
+ DockerFileNotFound,
+ fetch_file,
+ put_file,
+)
+
+from ansible_collections.community.docker.plugins.plugin_utils.socket_handler import (
+ DockerSocketHandler,
+)
+from ansible_collections.community.docker.plugins.plugin_utils.common_api import (
+ AnsibleDockerClient,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
+
+MIN_DOCKER_API = None
+
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local docker based connections '''
+
+ transport = 'community.docker.docker_api'
+ has_pipelining = True
+
+ def _call_client(self, callable, not_found_can_be_resource=False):
+ try:
+ return callable()
+ except NotFound as e:
+ if not_found_can_be_resource:
+ raise AnsibleConnectionFailure('Could not find container "{1}" or resource in it ({0})'.format(e, self.get_option('remote_addr')))
+ else:
+ raise AnsibleConnectionFailure('Could not find container "{1}" ({0})'.format(e, self.get_option('remote_addr')))
+ except APIError as e:
+ if e.response is not None and e.response.status_code == 409:
+ raise AnsibleConnectionFailure('The container "{1}" has been paused ({0})'.format(e, self.get_option('remote_addr')))
+ self.client.fail(
+ 'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
+ )
+ except RequestException as e:
+ self.client.fail(
+ 'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}'
+ .format(e, self.get_option('remote_addr'))
+ )
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.client = None
+ self.ids = dict()
+
+ # Windows uses Powershell modules
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+
+ self.actual_user = None
+
+ def _connect(self, port=None):
+ """ Connect to the container. Nothing to do """
+ super(Connection, self)._connect()
+ if not self._connected:
+ self.actual_user = self.get_option('remote_user')
+ display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
+ self.actual_user or u'?'), host=self.get_option('remote_addr')
+ )
+ if self.client is None:
+ self.client = AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
+ self._connected = True
+
+ if self.actual_user is None and display.verbosity > 2:
+ # Since we're not setting the actual_user, look it up so we have it for logging later
+ # Only do this if display verbosity is high enough that we'll need the value
+ # This saves overhead from calling into docker when we don't need to
+ display.vvv(u"Trying to determine actual user")
+ result = self._call_client(lambda: self.client.get_json('/containers/{0}/json', self.get_option('remote_addr')))
+ if result.get('Config'):
+ self.actual_user = result['Config'].get('User')
+ if self.actual_user is not None:
+ display.vvv(u"Actual user is '{0}'".format(self.actual_user))
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ Run a command on the docker host """
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ command = [self._play_context.executable, '-c', to_text(cmd)]
+
+ do_become = self.become and self.become.expect_prompt() and sudoable
+
+ display.vvv(
+ u"EXEC {0}{1}{2}".format(
+ to_text(command),
+ ', with stdin ({0} bytes)'.format(len(in_data)) if in_data is not None else '',
+ ', with become prompt' if do_become else '',
+ ),
+ host=self.get_option('remote_addr')
+ )
+
+ need_stdin = True if (in_data is not None) or do_become else False
+
+ data = {
+ 'Container': self.get_option('remote_addr'),
+ 'User': self.get_option('remote_user') or '',
+ 'Privileged': False,
+ 'Tty': False,
+ 'AttachStdin': need_stdin,
+ 'AttachStdout': True,
+ 'AttachStderr': True,
+ 'Cmd': command,
+ }
+
+ if 'detachKeys' in self.client._general_configs:
+ data['detachKeys'] = self.client._general_configs['detachKeys']
+
+ exec_data = self._call_client(lambda: self.client.post_json_to_json('/containers/{0}/exec', self.get_option('remote_addr'), data=data))
+ exec_id = exec_data['Id']
+
+ data = {
+ 'Tty': False,
+ 'Detach': False
+ }
+ if need_stdin:
+ exec_socket = self._call_client(lambda: self.client.post_json_to_stream_socket('/exec/{0}/start', exec_id, data=data))
+ try:
+ with DockerSocketHandler(display, exec_socket, container=self.get_option('remote_addr')) as exec_socket_handler:
+ if do_become:
+ become_output = [b'']
+
+ def append_become_output(stream_id, data):
+ become_output[0] += data
+
+ exec_socket_handler.set_block_done_callback(append_become_output)
+
+ while not self.become.check_success(become_output[0]) and not self.become.check_password_prompt(become_output[0]):
+ if not exec_socket_handler.select(self.get_option('container_timeout')):
+ stdout, stderr = exec_socket_handler.consume()
+ raise AnsibleConnectionFailure('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output[0]))
+
+ if exec_socket_handler.is_eof():
+ raise AnsibleConnectionFailure('privilege output closed while waiting for password prompt:\n' + to_native(become_output[0]))
+
+ if not self.become.check_success(become_output[0]):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ exec_socket_handler.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+
+ if in_data is not None:
+ exec_socket_handler.write(in_data)
+
+ stdout, stderr = exec_socket_handler.consume()
+ finally:
+ exec_socket.close()
+ else:
+ stdout, stderr = self._call_client(lambda: self.client.post_json_to_stream(
+ '/exec/{0}/start', exec_id, stream=False, demux=True, tty=False, data=data))
+
+ result = self._call_client(lambda: self.client.get_json('/exec/{0}/json', exec_id))
+
+ return result.get('ExitCode') or 0, stdout or b'', stderr or b''
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ return ntpath.normpath(remote_path)
+ else:
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ Transfer a file from local to docker container """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
+
+ out_path = self._prefix_login_path(out_path)
+
+ if self.actual_user not in self.ids:
+ dummy, ids, dummy = self.exec_command(b'id -u && id -g')
+ try:
+ user_id, group_id = ids.splitlines()
+ self.ids[self.actual_user] = int(user_id), int(group_id)
+ display.vvvv(
+ 'PUT: Determined uid={0} and gid={1} for user "{2}"'.format(user_id, group_id, self.actual_user),
+ host=self.get_option('remote_addr')
+ )
+ except Exception as e:
+ raise AnsibleConnectionFailure(
+ 'Error while determining user and group ID of current user in container "{1}": {0}\nGot value: {2!r}'
+ .format(e, self.get_option('remote_addr'), ids)
+ )
+
+ user_id, group_id = self.ids[self.actual_user]
+ try:
+ self._call_client(
+ lambda: put_file(
+ self.client,
+ container=self.get_option('remote_addr'),
+ in_path=in_path,
+ out_path=out_path,
+ user_id=user_id,
+ group_id=group_id,
+ user_name=self.actual_user,
+ follow_links=True,
+ ),
+ not_found_can_be_resource=True,
+ )
+ except DockerFileNotFound as exc:
+ raise AnsibleFileNotFound(to_native(exc))
+ except DockerFileCopyError as exc:
+ raise AnsibleConnectionFailure(to_native(exc))
+
+ def fetch_file(self, in_path, out_path):
+ """ Fetch a file from container to local. """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
+
+ in_path = self._prefix_login_path(in_path)
+
+ try:
+ self._call_client(
+ lambda: fetch_file(
+ self.client,
+ container=self.get_option('remote_addr'),
+ in_path=in_path,
+ out_path=out_path,
+ follow_links=True,
+ log=lambda msg: display.vvvv(msg, host=self.get_option('remote_addr')),
+ ),
+ not_found_can_be_resource=True,
+ )
+ except DockerFileNotFound as exc:
+ raise AnsibleFileNotFound(to_native(exc))
+ except DockerFileCopyError as exc:
+ raise AnsibleConnectionFailure(to_native(exc))
+
+ def close(self):
+ """ Terminate the connection. Nothing to do for Docker"""
+ super(Connection, self).close()
+ self._connected = False
+
+ def reset(self):
+ self.ids.clear()
diff --git a/ansible_collections/community/docker/plugins/connection/nsenter.py b/ansible_collections/community/docker/plugins/connection/nsenter.py
new file mode 100644
index 00000000..fff36afb
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/connection/nsenter.py
@@ -0,0 +1,239 @@
+# Copyright (c) 2021 Jeff Goldschrafe <jeff@holyhandgrenade.org>
+# Based on Ansible local connection plugin by:
+# Copyright (c) 2012 Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: nsenter
+short_description: execute on host running controller container
+version_added: 1.9.0
+description:
+ - This connection plugin allows Ansible, running in a privileged container, to execute tasks on the container
+ host instead of in the container itself.
+ - This is useful for running Ansible in a pull model, while still keeping the Ansible control node
+ containerized.
+ - It relies on having privileged access to run C(nsenter) in the host's PID namespace, allowing it to enter the
+ namespaces of the provided PID (default PID 1, or init/systemd).
+author: Jeff Goldschrafe (@jgoldschrafe)
+options:
+ nsenter_pid:
+ description:
+ - PID to attach with using nsenter.
+ - The default should be fine unless you are attaching as a non-root user.
+ type: int
+ default: 1
+ vars:
+ - name: ansible_nsenter_pid
+ env:
+ - name: ANSIBLE_NSENTER_PID
+ ini:
+ - section: nsenter_connection
+ key: nsenter_pid
+notes:
+ - The remote user is ignored; this plugin always runs as root.
+ - >-
+ This plugin requires the Ansible controller container to be launched in the following way:
+ (1) The container image contains the C(nsenter) program;
+ (2) The container is launched in privileged mode;
+ (3) The container is launched in the host's PID namespace (C(--pid host)).
+'''
+
+import os
+import pty
+import subprocess
+import fcntl
+
+import ansible.constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.compat import selectors
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ '''Connections to a container host using nsenter
+ '''
+
+ transport = 'community.docker.nsenter'
+ has_pipelining = False
+
+ def __init__(self, *args, **kwargs):
+ super(Connection, self).__init__(*args, **kwargs)
+ self.cwd = None
+
+ def _connect(self):
+ self._nsenter_pid = self.get_option("nsenter_pid")
+
+ # Because nsenter requires very high privileges, our remote user
+ # is always assumed to be root.
+ self._play_context.remote_user = "root"
+
+ if not self._connected:
+ display.vvv(
+ u"ESTABLISH NSENTER CONNECTION FOR USER: {0}".format(
+ self._play_context.remote_user
+ ),
+ host=self._play_context.remote_addr,
+ )
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ display.debug("in nsenter.exec_command()")
+
+ executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
+
+ if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
+ raise AnsibleError("failed to find the executable specified %s."
+ " Please verify if the executable exists and re-try." % executable)
+
+ # Rewrite the provided command to prefix it with nsenter
+ nsenter_cmd_parts = [
+ "nsenter",
+ "--ipc",
+ "--mount",
+ "--net",
+ "--pid",
+ "--uts",
+ "--preserve-credentials",
+ "--target={0}".format(self._nsenter_pid),
+ "--",
+ ]
+
+ if isinstance(cmd, (text_type, binary_type)):
+ cmd_parts = nsenter_cmd_parts + [cmd]
+ cmd = to_bytes(" ".join(cmd_parts))
+ else:
+ cmd_parts = nsenter_cmd_parts + cmd
+ cmd = [to_bytes(arg) for arg in cmd_parts]
+
+ display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
+ display.debug("opening command with Popen()")
+
+ master = None
+ stdin = subprocess.PIPE
+
+ # This plugin does not support pipelining. This diverges from the behavior of
+ # the core "local" connection plugin that this one derives from.
+ if sudoable and self.become and self.become.expect_prompt():
+ # Create a pty if sudoable for privlege escalation that needs it.
+ # Falls back to using a standard pipe if this fails, which may
+ # cause the command to fail in certain situations where we are escalating
+ # privileges or the command otherwise needs a pty.
+ try:
+ master, stdin = pty.openpty()
+ except (IOError, OSError) as e:
+ display.debug("Unable to open pty: %s" % to_native(e))
+
+ p = subprocess.Popen(
+ cmd,
+ shell=isinstance(cmd, (text_type, binary_type)),
+ executable=executable if isinstance(cmd, (text_type, binary_type)) else None,
+ cwd=self.cwd,
+ stdin=stdin,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+ # if we created a master, we can close the other half of the pty now, otherwise master is stdin
+ if master is not None:
+ os.close(stdin)
+
+ display.debug("done running command with Popen()")
+
+ if self.become and self.become.expect_prompt() and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ become_output = b''
+ try:
+ while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
+ events = selector.select(self._play_context.timeout)
+ if not events:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ chunk = p.stdout.read()
+ elif key.fileobj == p.stderr:
+ chunk = p.stderr.read()
+
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
+ become_output += chunk
+ finally:
+ selector.close()
+
+ if not self.become.check_success(become_output):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ if master is None:
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ else:
+ os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate(in_data)
+ display.debug("done communicating")
+
+ # finally, close the other half of the pty, if it was created
+ if master:
+ os.close(master)
+
+ display.debug("done with nsenter.exec_command()")
+ return (p.returncode, stdout, stderr)
+
+ def put_file(self, in_path, out_path):
+ super(Connection, self).put_file(in_path, out_path)
+
+ in_path = unfrackpath(in_path, basedir=self.cwd)
+ out_path = unfrackpath(out_path, basedir=self.cwd)
+
+ display.vvv(u"PUT {0} to {1}".format(in_path, out_path), host=self._play_context.remote_addr)
+ try:
+ with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
+ in_data = in_file.read()
+ rc, out, err = self.exec_command(cmd=["tee", out_path], in_data=in_data)
+ if rc != 0:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, err))
+ except IOError as e:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, to_native(e)))
+
+ def fetch_file(self, in_path, out_path):
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ in_path = unfrackpath(in_path, basedir=self.cwd)
+ out_path = unfrackpath(out_path, basedir=self.cwd)
+
+ try:
+ rc, out, err = self.exec_command(cmd=["cat", in_path])
+ display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
+ if rc != 0:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(in_path, err))
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as out_file:
+ out_file.write(out)
+ except IOError as e:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ self._connected = False
diff --git a/ansible_collections/community/docker/plugins/doc_fragments/attributes.py b/ansible_collections/community/docker/plugins/doc_fragments/attributes.py
new file mode 100644
index 00000000..00b3319f
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/doc_fragments/attributes.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options: {}
+attributes:
+ check_mode:
+ description: Can run in C(check_mode) and return changed status prediction without modifying target.
+ diff_mode:
+ description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
+'''
+
+ # Should be used together with the standard fragment
+ INFO_MODULE = r'''
+options: {}
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+'''
+
+ ACTIONGROUP_DOCKER = r'''
+options: {}
+attributes:
+ action_group:
+ description: Use C(group/docker) or C(group/community.docker.docker) in C(module_defaults) to set defaults for this module.
+ support: full
+ membership:
+ - community.docker.docker
+ - docker
+'''
+
+ CONN = r'''
+options: {}
+attributes:
+ become:
+ description: Is usable alongside C(become) keywords.
+ connection:
+ description: Uses the target's configured connection information to execute code on it.
+ delegation:
+ description: Can be used in conjunction with C(delegate_to) and related keywords.
+'''
+
+ FACTS = r'''
+options: {}
+attributes:
+ facts:
+ description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
+'''
+
+ # Should be used together with the standard fragment and the FACTS fragment
+ FACTS_MODULE = r'''
+options: {}
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+ facts:
+ support: full
+'''
+
+ FILES = r'''
+options: {}
+attributes:
+ safe_file_operations:
+ description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
+'''
+
+ FLOW = r'''
+options: {}
+attributes:
+ action:
+ description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
+ async:
+ description: Supports being used with the C(async) keyword.
+'''
diff --git a/ansible_collections/community/docker/plugins/doc_fragments/docker.py b/ansible_collections/community/docker/plugins/doc_fragments/docker.py
new file mode 100644
index 00000000..4c537850
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/doc_fragments/docker.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Docker doc fragment
+ DOCUMENTATION = r'''
+options:
+ docker_host:
+ description:
+ - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
+ TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
+ the module will automatically replace C(tcp) in the connection URL with C(https).
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: unix://var/run/docker.sock
+ aliases: [ docker_url ]
+ tls_hostname:
+ description:
+ - When verifying the authenticity of the Docker Host server, provide the expected name of the server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
+ be used instead. If the environment variable is not set, the default value will be used.
+ - Note that this option had a default value C(localhost) in older versions. It was removed in community.docker 3.0.0.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: auto
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ ca_cert:
+ description:
+ - Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description:
+ - Path to the client's TLS certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ client_key:
+ description:
+ - Path to the client's TLS key file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ssl_version:
+ description:
+ - Provide a valid SSL version number. Default value determined by ssl.py module.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
+ used instead.
+ type: str
+ tls:
+ description:
+ - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
+ server. Note that if I(validate_certs) is set to C(true) as well, it will take precedence.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: false
+ use_ssh_client:
+ description:
+ - For SSH transports, use the C(ssh) CLI tool instead of paramiko.
+ - Requires Docker SDK for Python 4.4.0 or newer.
+ type: bool
+ default: false
+ version_added: 1.5.0
+ validate_certs:
+ description:
+ - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: false
+ aliases: [ tls_verify ]
+ debug:
+ description:
+ - Debug mode
+ type: bool
+ default: false
+
+notes:
+ - Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
+ You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
+ C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
+ with the product that sets up the environment. It will set these variables for you. See
+ U(https://docs.docker.com/machine/reference/env/) for more details.
+ - When connecting to Docker daemon with TLS, you might need to install additional Python packages.
+ For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
+ - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
+ In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
+ and use C($DOCKER_CONFIG/config.json) otherwise.
+'''
+
+ # For plugins: allow to define common options with Ansible variables
+
+ VAR_NAMES = r'''
+options:
+ docker_host:
+ vars:
+ - name: ansible_docker_docker_host
+ tls_hostname:
+ vars:
+ - name: ansible_docker_tls_hostname
+ api_version:
+ vars:
+ - name: ansible_docker_api_version
+ timeout:
+ vars:
+ - name: ansible_docker_timeout
+ ca_cert:
+ vars:
+ - name: ansible_docker_ca_cert
+ client_cert:
+ vars:
+ - name: ansible_docker_client_cert
+ client_key:
+ vars:
+ - name: ansible_docker_client_key
+ ssl_version:
+ vars:
+ - name: ansible_docker_ssl_version
+ tls:
+ vars:
+ - name: ansible_docker_tls
+ validate_certs:
+ vars:
+ - name: ansible_docker_validate_certs
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
+
+ DOCKER_PY_1_DOCUMENTATION = r'''
+options: {}
+notes:
+ - This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
+ communicate with the Docker daemon.
+requirements:
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details). Note that both
+ modules should *not* be installed at the same time. Also note that when both modules are
+ installed and one of them is uninstalled, the other might no longer function and a reinstall
+ of it is required."
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
+ # Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
+
+ DOCKER_PY_2_DOCUMENTATION = r'''
+options: {}
+notes:
+ - This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
+ communicate with the Docker daemon.
+requirements:
+ - "Python >= 2.7"
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details).
+ This module does *not* work with docker-py."
+'''
+
+ # Docker doc fragment when using the vendored API access code
+ API_DOCUMENTATION = r'''
+options:
+ docker_host:
+ description:
+ - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
+ TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
+ the module will automatically replace C(tcp) in the connection URL with C(https).
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: unix://var/run/docker.sock
+ aliases: [ docker_url ]
+ tls_hostname:
+ description:
+ - When verifying the authenticity of the Docker Host server, provide the expected name of the server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
+ be used instead. If the environment variable is not set, the default value will be used.
+ - Note that this option had a default value C(localhost) in older versions. It was removed in community.docker 3.0.0.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by this collection and the docker daemon.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: auto
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ ca_cert:
+ description:
+ - Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description:
+ - Path to the client's TLS certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ client_key:
+ description:
+ - Path to the client's TLS key file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ssl_version:
+ description:
+ - Provide a valid SSL version number. Default value determined by ssl.py module.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
+ used instead.
+ type: str
+ tls:
+ description:
+ - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
+ server. Note that if I(validate_certs) is set to C(true) as well, it will take precedence.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: false
+ use_ssh_client:
+ description:
+ - For SSH transports, use the C(ssh) CLI tool instead of paramiko.
+ type: bool
+ default: false
+ version_added: 1.5.0
+ validate_certs:
+ description:
+ - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: false
+ aliases: [ tls_verify ]
+ debug:
+ description:
+ - Debug mode
+ type: bool
+ default: false
+
+notes:
+ - Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
+ You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
+ C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
+ with the product that sets up the environment. It will set these variables for you. See
+ U(https://docs.docker.com/machine/reference/env/) for more details.
+# - When connecting to Docker daemon with TLS, you might need to install additional Python packages.
+# For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
+# - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
+# In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
+# and use C($DOCKER_CONFIG/config.json) otherwise.
+ - This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
+ communicate with the Docker daemon. It uses code derived from the Docker SDK or Python that is included in this
+ collection.
+requirements:
+ - requests
+ - pywin32 (when using named pipes on Windows 32)
+ - paramiko (when using SSH with I(use_ssh_client=false))
+ - pyOpenSSL (when using TLS)
+ - backports.ssl_match_hostname (when using TLS on Python 2)
+'''
diff --git a/ansible_collections/community/docker/plugins/inventory/docker_containers.py b/ansible_collections/community/docker/plugins/inventory/docker_containers.py
new file mode 100644
index 00000000..c94b0e12
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/inventory/docker_containers.py
@@ -0,0 +1,351 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
+# For the parts taken from the docker inventory script:
+# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
+# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
+# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+name: docker_containers
+short_description: Ansible dynamic inventory plugin for Docker containers
+version_added: 1.1.0
+author:
+ - Felix Fontein (@felixfontein)
+extends_documentation_fragment:
+ - ansible.builtin.constructed
+ - community.docker.docker.api_documentation
+description:
+ - Reads inventories from the Docker API.
+ - Uses a YAML configuration file that ends with C(docker.[yml|yaml]).
+options:
+ plugin:
+ description:
+ - The name of this plugin, it should always be set to C(community.docker.docker_containers)
+ for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices: [ community.docker.docker_containers ]
+
+ connection_type:
+ description:
+ - Which connection type to use the containers.
+ - One way to connect to containers is to use SSH (C(ssh)). For this, the options I(default_ip) and
+ I(private_ssh_port) are used. This requires that a SSH daemon is running inside the containers.
+ - Alternatively, C(docker-cli) selects the
+ R(docker connection plugin,ansible_collections.community.docker.docker_connection),
+ and C(docker-api) (default) selects the
+ R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection).
+ - When C(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin
+ to the connection plugin. This can be controlled with I(configure_docker_daemon).
+ type: str
+ default: docker-api
+ choices:
+ - ssh
+ - docker-cli
+ - docker-api
+
+ configure_docker_daemon:
+ description:
+ - Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
+ - Only used when I(connection_type=docker-api).
+ type: bool
+ default: true
+ version_added: 1.8.0
+
+ verbose_output:
+ description:
+ - Toggle to (not) include all available inspection metadata.
+ - Note that all top-level keys will be transformed to the format C(docker_xxx).
+ For example, C(HostConfig) is converted to C(docker_hostconfig).
+ - If this is C(false), these values can only be used during I(constructed), I(groups), and I(keyed_groups).
+ - The C(docker) inventory script always added these variables, so for compatibility set this to C(true).
+ type: bool
+ default: false
+
+ default_ip:
+ description:
+ - The IP address to assign to ansible_host when the container's SSH port is mapped to interface
+ '0.0.0.0'.
+ - Only used if I(connection_type) is C(ssh).
+ type: str
+ default: 127.0.0.1
+
+ private_ssh_port:
+ description:
+ - The port containers use for SSH.
+ - Only used if I(connection_type) is C(ssh).
+ type: int
+ default: 22
+
+ add_legacy_groups:
+ description:
+ - "Add the same groups as the C(docker) inventory script does. These are the following:"
+ - "C(<container id>): contains the container of this ID."
+ - "C(<container name>): contains the container that has this name."
+ - "C(<container short id>): contains the containers that have this short ID (first 13 letters of ID)."
+ - "C(image_<image name>): contains the containers that have the image C(<image name>)."
+ - "C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>)."
+ - "C(service_<service name>): contains the containers that belong to the service C(<service name>)"
+ - "C(<docker_host>): contains the containers which belong to the Docker daemon I(docker_host).
+ Useful if you run this plugin against multiple Docker daemons."
+ - "C(running): contains all containers that are running."
+ - "C(stopped): contains all containers that are not running."
+ - If this is not set to C(true), you should use keyed groups to add the containers to groups.
+ See the examples for how to do that.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+# Minimal example using local Docker daemon
+plugin: community.docker.docker_containers
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote Docker daemon
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote Docker daemon with unverified TLS
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2376
+tls: true
+
+# Example using remote Docker daemon with verified TLS and client certificate verification
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2376
+validate_certs: true
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2375
+strict: false
+keyed_groups:
+ # Add containers with primary network foo to a network_foo group
+ - prefix: network
+ key: 'docker_hostconfig.NetworkMode'
+ # Add Linux hosts to an os_linux group
+ - prefix: os
+ key: docker_platform
+
+# Example using SSH connection with an explicit fallback for when port 22 has not been
+# exported: use container name as ansible_ssh_host and 22 as ansible_ssh_port
+plugin: community.docker.docker_containers
+connection_type: ssh
+compose:
+ ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true)
+ ansible_ssh_port: ansible_ssh_port | default(22, true)
+'''
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_native
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DOCKER_COMMON_ARGS_VARS,
+)
+from ansible_collections.community.docker.plugins.plugin_utils.common_api import (
+ AnsibleDockerClient,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException
+
+MIN_DOCKER_API = None
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker daemon as source. '''
+
+ NAME = 'community.docker.docker_containers'
+
+ def _slugify(self, value):
+ return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
+
+ def _populate(self, client):
+ strict = self.get_option('strict')
+
+ ssh_port = self.get_option('private_ssh_port')
+ default_ip = self.get_option('default_ip')
+ hostname = self.get_option('docker_host')
+ verbose_output = self.get_option('verbose_output')
+ connection_type = self.get_option('connection_type')
+ add_legacy_groups = self.get_option('add_legacy_groups')
+
+ try:
+ params = {
+ 'limit': -1,
+ 'all': 1,
+ 'size': 0,
+ 'trunc_cmd': 0,
+ 'since': None,
+ 'before': None,
+ }
+ containers = client.get_json('/containers/json', params=params)
+ except APIError as exc:
+ raise AnsibleError("Error listing containers: %s" % to_native(exc))
+
+ if add_legacy_groups:
+ self.inventory.add_group('running')
+ self.inventory.add_group('stopped')
+
+ extra_facts = {}
+ if self.get_option('configure_docker_daemon'):
+ for option_name, var_name in DOCKER_COMMON_ARGS_VARS.items():
+ value = self.get_option(option_name)
+ if value is not None:
+ extra_facts[var_name] = value
+
+ for container in containers:
+ id = container.get('Id')
+ short_id = id[:13]
+
+ try:
+ name = container.get('Names', list())[0].lstrip('/')
+ full_name = name
+ except IndexError:
+ name = short_id
+ full_name = id
+
+ self.inventory.add_host(name)
+ facts = dict(
+ docker_name=name,
+ docker_short_id=short_id
+ )
+ full_facts = dict()
+
+ try:
+ inspect = client.get_json('/containers/{0}/json', id)
+ except APIError as exc:
+ raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc)))
+
+ state = inspect.get('State') or dict()
+ config = inspect.get('Config') or dict()
+ labels = config.get('Labels') or dict()
+
+ running = state.get('Running')
+
+ # Add container to groups
+ image_name = config.get('Image')
+ if image_name and add_legacy_groups:
+ self.inventory.add_group('image_{0}'.format(image_name))
+ self.inventory.add_host(name, group='image_{0}'.format(image_name))
+
+ stack_name = labels.get('com.docker.stack.namespace')
+ if stack_name:
+ full_facts['docker_stack'] = stack_name
+ if add_legacy_groups:
+ self.inventory.add_group('stack_{0}'.format(stack_name))
+ self.inventory.add_host(name, group='stack_{0}'.format(stack_name))
+
+ service_name = labels.get('com.docker.swarm.service.name')
+ if service_name:
+ full_facts['docker_service'] = service_name
+ if add_legacy_groups:
+ self.inventory.add_group('service_{0}'.format(service_name))
+ self.inventory.add_host(name, group='service_{0}'.format(service_name))
+
+ if connection_type == 'ssh':
+ # Figure out ssh IP and Port
+ try:
+ # Lookup the public facing port Nat'ed to ssh port.
+ network_settings = inspect.get('NetworkSettings') or {}
+ port_settings = network_settings.get('Ports') or {}
+ port = port_settings.get('%d/tcp' % (ssh_port, ))[0]
+ except (IndexError, AttributeError, TypeError):
+ port = dict()
+
+ try:
+ ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
+ except KeyError:
+ ip = ''
+
+ facts.update(dict(
+ ansible_ssh_host=ip,
+ ansible_ssh_port=port.get('HostPort', 0),
+ ))
+ elif connection_type == 'docker-cli':
+ facts.update(dict(
+ ansible_host=full_name,
+ ansible_connection='community.docker.docker',
+ ))
+ elif connection_type == 'docker-api':
+ facts.update(dict(
+ ansible_host=full_name,
+ ansible_connection='community.docker.docker_api',
+ ))
+ facts.update(extra_facts)
+
+ full_facts.update(facts)
+ for key, value in inspect.items():
+ fact_key = self._slugify(key)
+ full_facts[fact_key] = value
+
+ if verbose_output:
+ facts.update(full_facts)
+
+ for key, value in facts.items():
+ self.inventory.set_variable(name, key, value)
+
+ # Use constructed if applicable
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), full_facts, name, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), full_facts, name, strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), full_facts, name, strict=strict)
+
+ # We need to do this last since we also add a group called `name`.
+ # When we do this before a set_variable() call, the variables are assigned
+ # to the group, and not to the host.
+ if add_legacy_groups:
+ self.inventory.add_group(id)
+ self.inventory.add_host(name, group=id)
+ self.inventory.add_group(name)
+ self.inventory.add_host(name, group=name)
+ self.inventory.add_group(short_id)
+ self.inventory.add_host(name, group=short_id)
+ self.inventory.add_group(hostname)
+ self.inventory.add_host(name, group=hostname)
+
+ if running is True:
+ self.inventory.add_host(name, group='running')
+ else:
+ self.inventory.add_host(name, group='stopped')
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker.yaml', 'docker.yml')))
+
+ def _create_client(self):
+ return AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ client = self._create_client()
+ try:
+ self._populate(client)
+ except DockerException as e:
+ raise AnsibleError(
+ 'An unexpected Docker error occurred: {0}'.format(e)
+ )
+ except RequestException as e:
+ raise AnsibleError(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(e)
+ )
diff --git a/ansible_collections/community/docker/plugins/inventory/docker_machine.py b/ansible_collections/community/docker/plugins/inventory/docker_machine.py
new file mode 100644
index 00000000..69d94610
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/inventory/docker_machine.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_machine
+ author: Ximon Eighteen (@ximon18)
+ short_description: Docker Machine inventory source
+ requirements:
+ - L(Docker Machine,https://docs.docker.com/machine/)
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Get inventory hosts from Docker Machine.
+ - Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
+ - The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
+ - The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the C(docker_machine) plugin.
+ required: true
+ choices: ['docker_machine', 'community.docker.docker_machine']
+ daemon_env:
+ description:
+ - Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
+ - With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
+ A warning will be issued for any skipped host if the choice is C(require).
+ - With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
+ A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
+ - With C(skip), do not attempt to fetch the docker daemon connection environment variables.
+ - If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
+ type: str
+ choices:
+ - require
+ - require-silently
+ - optional
+ - optional-silently
+ - skip
+ default: require
+ running_required:
+ description:
+ - When C(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
+ type: bool
+ default: true
+ verbose_output:
+ description:
+ - When C(true), include all available nodes metadata (for exmaple C(Image), C(Region), C(Size)) as a JSON object
+ named C(docker_machine_node_attributes).
+ type: bool
+ default: true
+'''
+
+EXAMPLES = '''
+# Minimal example
+plugin: community.docker.docker_machine
+
+# Example using constructed features to create a group per Docker Machine driver
+# (https://docs.docker.com/machine/drivers/), for example:
+# $ docker-machine create --driver digitalocean ... mymachine
+# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
+# {
+# ...
+# "digitalocean": {
+# "hosts": [
+# "mymachine"
+# ]
+# ...
+# }
+strict: false
+keyed_groups:
+ - separator: ''
+ key: docker_machine_node_attributes.DriverName
+
+# Example grouping hosts by Digital Machine tag
+strict: false
+keyed_groups:
+ - prefix: tag
+ key: 'dm_tags'
+
+# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
+compose:
+ ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.common.process import get_bin_path
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+
+import json
+import re
+import subprocess
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using Docker machine as source. '''
+
+ NAME = 'community.docker.docker_machine'
+
+ DOCKER_MACHINE_PATH = None
+
+ def _run_command(self, args):
+ if not self.DOCKER_MACHINE_PATH:
+ try:
+ self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
+ except ValueError as e:
+ raise AnsibleError(to_native(e))
+
+ command = [self.DOCKER_MACHINE_PATH]
+ command.extend(args)
+ display.debug('Executing command {0}'.format(command))
+ try:
+ result = subprocess.check_output(command)
+ except subprocess.CalledProcessError as e:
+ display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
+ raise e
+
+ return to_text(result).strip()
+
+ def _get_docker_daemon_variables(self, machine_name):
+ '''
+ Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
+ the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
+ '''
+ try:
+ env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
+ except subprocess.CalledProcessError:
+ # This can happen when the machine is created but provisioning is incomplete
+ return []
+
+ # example output of docker-machine env --shell=sh:
+ # export DOCKER_TLS_VERIFY="1"
+ # export DOCKER_HOST="tcp://134.209.204.160:2376"
+ # export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
+ # export DOCKER_MACHINE_NAME="routinator"
+ # # Run this command to configure your shell:
+ # # eval $(docker-machine env --shell=bash routinator)
+
+ # capture any of the DOCKER_xxx variables that were output and create Ansible host vars
+ # with the same name and value but with a dm_ name prefix.
+ vars = []
+ for line in env_lines:
+ match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
+ if match:
+ env_var_name = match.group(1)
+ env_var_value = match.group(2)
+ vars.append((env_var_name, env_var_value))
+
+ return vars
+
+ def _get_machine_names(self):
+ # Filter out machines that are not in the Running state as we probably can't do anything useful actions
+ # with them.
+ ls_command = ['ls', '-q']
+ if self.get_option('running_required'):
+ ls_command.extend(['--filter', 'state=Running'])
+
+ try:
+ ls_lines = self._run_command(ls_command)
+ except subprocess.CalledProcessError:
+ return []
+
+ return ls_lines.splitlines()
+
+ def _inspect_docker_machine_host(self, node):
+ try:
+ inspect_lines = self._run_command(['inspect', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return json.loads(inspect_lines)
+
+ def _ip_addr_docker_machine_host(self, node):
+ try:
+ ip_addr = self._run_command(['ip', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return ip_addr
+
+ def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
+ if not env_var_tuples:
+ warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
+ if daemon_env in ('require', 'require-silently'):
+ if daemon_env == 'require':
+ display.warning('{0}: host will be skipped'.format(warning_prefix))
+ return True
+ else: # 'optional', 'optional-silently'
+ if daemon_env == 'optional':
+ display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
+ return False
+
+ def _populate(self):
+ daemon_env = self.get_option('daemon_env')
+ try:
+ for self.node in self._get_machine_names():
+ self.node_attrs = self._inspect_docker_machine_host(self.node)
+ if not self.node_attrs:
+ continue
+
+ machine_name = self.node_attrs['Driver']['MachineName']
+
+ # query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
+ # that could be used to set environment variables to influence a local Docker client:
+ if daemon_env == 'skip':
+ env_var_tuples = []
+ else:
+ env_var_tuples = self._get_docker_daemon_variables(machine_name)
+ if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
+ continue
+
+ # add an entry in the inventory for this host
+ self.inventory.add_host(machine_name)
+
+ # check for valid ip address from inspect output, else explicitly use ip command to find host ip address
+ # this works around an issue seen with Google Compute Platform where the IP address was not available
+ # via the 'inspect' subcommand but was via the 'ip' subcomannd.
+ if self.node_attrs['Driver']['IPAddress']:
+ ip_addr = self.node_attrs['Driver']['IPAddress']
+ else:
+ ip_addr = self._ip_addr_docker_machine_host(self.node)
+
+ # set standard Ansible remote host connection settings to details captured from `docker-machine`
+ # see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
+ self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
+ self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
+ self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
+ self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
+
+ # set variables based on Docker Machine tags
+ tags = self.node_attrs['Driver'].get('Tags') or ''
+ self.inventory.set_variable(machine_name, 'dm_tags', tags)
+
+ # set variables based on Docker Machine env variables
+ for kv in env_var_tuples:
+ self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
+
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
+
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
+ to_native(e), orig_exc=e)
+
+ def verify_file(self, path):
+ """Return the possibility of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/ansible_collections/community/docker/plugins/inventory/docker_swarm.py b/ansible_collections/community/docker/plugins/inventory/docker_swarm.py
new file mode 100644
index 00000000..9ae2fb9a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/inventory/docker_swarm.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_swarm
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
+ requirements:
+ - python >= 2.7
+ - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the Docker swarm API.
+ - Uses a YAML configuration file docker_swarm.[yml|yaml].
+ - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
+ I(managers) - all manager nodes; I(leader) - the swarm leader node;
+ I(nonleaders) - all nodes except the swarm leader."
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.docker.docker_swarm)
+ for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices: [ docker_swarm, community.docker.docker_swarm ]
+ docker_host:
+ description:
+ - Socket of a Docker swarm manager node (C(tcp), C(unix)).
+ - "Use C(unix://var/run/docker.sock) to connect via local socket."
+ type: str
+ required: true
+ aliases: [ docker_url ]
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS),
+ C(EngineVersion))
+ type: bool
+ default: true
+ tls:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ type: bool
+ default: false
+ validate_certs:
+ description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
+ host server.
+ type: bool
+ default: false
+ aliases: [ tls_verify ]
+ client_key:
+ description: Path to the client's TLS key file.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ca_cert:
+ description: Use a CA certificate when performing server verification by providing the path to a CA
+ certificate file.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description: Path to the client's TLS certificate file.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ tls_hostname:
+ description: When verifying the authenticity of the Docker host server, provide the expected name of
+ the server.
+ type: str
+ ssl_version:
+ description: Provide a valid SSL version number. Default value determined by ssl.py module.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by Docker SDK for Python.
+ type: str
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
+ will be used instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ aliases: [ time_out ]
+ use_ssh_client:
+ description:
+ - For SSH transports, use the C(ssh) CLI tool instead of paramiko.
+ - Requires Docker SDK for Python 4.4.0 or newer.
+ type: bool
+ default: false
+ version_added: 1.5.0
+ include_host_uri:
+ description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
+ swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
+ modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
+ The port always defaults to C(2376).
+ type: bool
+ default: false
+ include_host_uri_port:
+ description: Override the detected port number included in I(ansible_host_uri)
+ type: int
+'''
+
+EXAMPLES = '''
+# Minimal example using local docker
+plugin: community.docker.docker_swarm
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote docker
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote docker with unverified TLS
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2376
+tls: true
+
+# Example using remote docker with verified TLS and client certificate verification
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2376
+validate_certs: true
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups and set ansible_host
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2375
+strict: False
+keyed_groups:
+ # add for example x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'Description.Platform.Architecture'
+ # add for example linux hosts to an os_linux group
+ - prefix: os
+ key: 'Description.Platform.OS'
+ # create a group per node label
+ # for exomple a node labeled w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'Spec.Labels'
+ prefix: label
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.docker.plugins.module_utils.common import get_connect_params
+from ansible_collections.community.docker.plugins.module_utils.util import update_tls_hostname
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.parsing.utils.addresses import parse_address
+
+try:
+ import docker
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker swarm as source. '''
+
+ NAME = 'community.docker.docker_swarm'
+
+ def _fail(self, msg):
+ raise AnsibleError(msg)
+
+ def _populate(self):
+ raw_params = dict(
+ docker_host=self.get_option('docker_host'),
+ tls=self.get_option('tls'),
+ tls_verify=self.get_option('validate_certs'),
+ key_path=self.get_option('client_key'),
+ cacert_path=self.get_option('ca_cert'),
+ cert_path=self.get_option('client_cert'),
+ tls_hostname=self.get_option('tls_hostname'),
+ api_version=self.get_option('api_version'),
+ timeout=self.get_option('timeout'),
+ ssl_version=self.get_option('ssl_version'),
+ use_ssh_client=self.get_option('use_ssh_client'),
+ debug=None,
+ )
+ update_tls_hostname(raw_params)
+ connect_params = get_connect_params(raw_params, fail_function=self._fail)
+ self.client = docker.DockerClient(**connect_params)
+ self.inventory.add_group('all')
+ self.inventory.add_group('manager')
+ self.inventory.add_group('worker')
+ self.inventory.add_group('leader')
+ self.inventory.add_group('nonleaders')
+
+ if self.get_option('include_host_uri'):
+ if self.get_option('include_host_uri_port'):
+ host_uri_port = str(self.get_option('include_host_uri_port'))
+ elif self.get_option('tls') or self.get_option('validate_certs'):
+ host_uri_port = '2376'
+ else:
+ host_uri_port = '2375'
+
+ try:
+ self.nodes = self.client.nodes.list()
+ for self.node in self.nodes:
+ self.node_attrs = self.client.nodes.get(self.node.id).attrs
+ self.inventory.add_host(self.node_attrs['ID'])
+ self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
+ self.node_attrs['Status']['Addr'])
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
+ if 'ManagerStatus' in self.node_attrs:
+ if self.node_attrs['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
+ self.node_attrs['Status']['Addr']
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + swarm_leader_ip + ':' + host_uri_port)
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
+ self.inventory.add_host(self.node_attrs['ID'], group='leader')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
+ to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_DOCKER:
+ raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
+ 'https://github.com/docker/docker-py.')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/_import_helper.py b/ansible_collections/community/docker/plugins/module_utils/_api/_import_helper.py
new file mode 100644
index 00000000..f3ea504c
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/_import_helper.py
@@ -0,0 +1,97 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils.six import PY2
+
+
+REQUESTS_IMPORT_ERROR = None
+URLLIB3_IMPORT_ERROR = None
+BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = None
+
+
+try:
+ from requests import Session # noqa: F401, pylint: disable=unused-import
+ from requests.adapters import HTTPAdapter # noqa: F401, pylint: disable=unused-import
+ from requests.exceptions import HTTPError, InvalidSchema # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ REQUESTS_IMPORT_ERROR = traceback.format_exc()
+
+ class Session(object):
+ __attrs__ = []
+
+ class HTTPAdapter(object):
+ __attrs__ = []
+
+ class HTTPError(Exception):
+ pass
+
+ class InvalidSchema(Exception):
+ pass
+
+
+try:
+ from requests.packages import urllib3
+except ImportError:
+ try:
+ import urllib3
+ except ImportError:
+ URLLIB3_IMPORT_ERROR = traceback.format_exc()
+
+ class _HTTPConnectionPool(object):
+ pass
+
+ class FakeURLLIB3(object):
+ def __init__(self):
+ self._collections = self
+ self.poolmanager = self
+ self.connection = self
+ self.connectionpool = self
+
+ self.RecentlyUsedContainer = object()
+ self.PoolManager = object()
+ self.match_hostname = object()
+ self.HTTPConnectionPool = _HTTPConnectionPool
+
+ urllib3 = FakeURLLIB3()
+
+
+# Monkey-patching match_hostname with a version that supports
+# IP-address checking. Not necessary for Python 3.5 and above
+if PY2:
+ try:
+ from backports.ssl_match_hostname import match_hostname
+ urllib3.connection.match_hostname = match_hostname
+ except ImportError:
+ BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = traceback.format_exc()
+
+
+def fail_on_missing_imports():
+ if REQUESTS_IMPORT_ERROR is not None:
+ from .errors import MissingRequirementException
+
+ raise MissingRequirementException(
+ 'You have to install requests',
+ 'requests', REQUESTS_IMPORT_ERROR)
+ if URLLIB3_IMPORT_ERROR is not None:
+ from .errors import MissingRequirementException
+
+ raise MissingRequirementException(
+ 'You have to install urllib3',
+ 'urllib3', URLLIB3_IMPORT_ERROR)
+ if BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR is not None:
+ from .errors import MissingRequirementException
+
+ raise MissingRequirementException(
+ 'You have to install backports.ssl-match-hostname',
+ 'backports.ssl-match-hostname', BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py b/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py
new file mode 100644
index 00000000..d9ec5870
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py
@@ -0,0 +1,606 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import logging
+import struct
+from functools import partial
+
+from ansible.module_utils.six import PY3, binary_type, iteritems, string_types, raise_from
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+from .. import auth
+from .._import_helper import fail_on_missing_imports
+from .._import_helper import HTTPError as _HTTPError
+from .._import_helper import InvalidSchema as _InvalidSchema
+from .._import_helper import Session as _Session
+from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
+ DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
+ DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
+ MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES,
+ DEFAULT_DATA_CHUNK_SIZE)
+from ..errors import (DockerException, InvalidVersion, TLSParameterError, MissingRequirementException,
+ create_api_error_from_http_exception)
+from ..tls import TLSConfig
+from ..transport.npipeconn import NpipeHTTPAdapter
+from ..transport.npipesocket import PYWIN32_IMPORT_ERROR
+from ..transport.unixconn import UnixHTTPAdapter
+from ..transport.sshconn import SSHHTTPAdapter, PARAMIKO_IMPORT_ERROR
+from ..transport.ssladapter import SSLHTTPAdapter
+from ..utils import config, utils, json_stream
+from ..utils.decorators import check_resource, update_headers
+from ..utils.proxy import ProxyConfig
+from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
+
+from .daemon import DaemonApiMixin
+
+
+log = logging.getLogger(__name__)
+
+
+class APIClient(
+ _Session,
+ DaemonApiMixin):
+ """
+ A low-level client for the Docker Engine API.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
+ >>> client.version()
+ {u'ApiVersion': u'1.33',
+ u'Arch': u'amd64',
+ u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
+ u'GitCommit': u'f4ffd2511c',
+ u'GoVersion': u'go1.9.2',
+ u'KernelVersion': u'4.14.3-1-ARCH',
+ u'MinAPIVersion': u'1.12',
+ u'Os': u'linux',
+ u'Version': u'17.10.0-ce'}
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.35``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ credstore_env (dict): Override environment variables when calling the
+ credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
+ """
+
+ __attrs__ = _Session.__attrs__ + ['_auth_configs',
+ '_general_configs',
+ '_version',
+ 'base_url',
+ 'timeout']
+
+ def __init__(self, base_url=None, version=None,
+ timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
+ user_agent=DEFAULT_USER_AGENT, num_pools=None,
+ credstore_env=None, use_ssh_client=False,
+ max_pool_size=DEFAULT_MAX_POOL_SIZE):
+ super(APIClient, self).__init__()
+
+ fail_on_missing_imports()
+
+ if tls and not base_url:
+ raise TLSParameterError(
+ 'If using TLS, the base_url argument must be provided.'
+ )
+
+ self.base_url = base_url
+ self.timeout = timeout
+ self.headers['User-Agent'] = user_agent
+
+ self._general_configs = config.load_general_config()
+
+ proxy_config = self._general_configs.get('proxies', {})
+ try:
+ proxies = proxy_config[base_url]
+ except KeyError:
+ proxies = proxy_config.get('default', {})
+
+ self._proxy_configs = ProxyConfig.from_dict(proxies)
+
+ self._auth_configs = auth.load_config(
+ config_dict=self._general_configs, credstore_env=credstore_env,
+ )
+ self.credstore_env = credstore_env
+
+ base_url = utils.parse_host(
+ base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
+ )
+ # SSH has a different default for num_pools to all other adapters
+ num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
+ base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
+
+ if base_url.startswith('http+unix://'):
+ self._custom_adapter = UnixHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ # host part of URL should be unused, but is resolved by requests
+ # module in proxy_bypass_macosx_sysconf()
+ self.base_url = 'http+docker://localhost'
+ elif base_url.startswith('npipe://'):
+ if not IS_WINDOWS_PLATFORM:
+ raise DockerException(
+ 'The npipe:// protocol is only supported on Windows'
+ )
+ if PYWIN32_IMPORT_ERROR is not None:
+ raise MissingRequirementException(
+ 'Install pypiwin32 package to enable npipe:// support',
+ 'pywin32',
+ PYWIN32_IMPORT_ERROR)
+ self._custom_adapter = NpipeHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self.base_url = 'http+docker://localnpipe'
+ elif base_url.startswith('ssh://'):
+ if PARAMIKO_IMPORT_ERROR is not None and not use_ssh_client:
+ raise MissingRequirementException(
+ 'Install paramiko package to enable ssh:// support',
+ 'paramiko',
+ PARAMIKO_IMPORT_ERROR)
+ self._custom_adapter = SSHHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size, shell_out=use_ssh_client
+ )
+ self.mount('http+docker://ssh', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ self.base_url = 'http+docker://ssh'
+ else:
+ # Use SSLAdapter for the ability to specify SSL version
+ if isinstance(tls, TLSConfig):
+ tls.configure_client(self)
+ elif tls:
+ self._custom_adapter = SSLHTTPAdapter(
+ pool_connections=num_pools)
+ self.mount('https://', self._custom_adapter)
+ self.base_url = base_url
+
+ # version detection needs to be after unix adapter mounting
+ if version is None or (isinstance(version, string_types) and version.lower() == 'auto'):
+ self._version = self._retrieve_server_version()
+ else:
+ self._version = version
+ if not isinstance(self._version, string_types):
+ raise DockerException(
+ 'Version parameter must be a string or None. Found {0}'.format(
+ type(version).__name__
+ )
+ )
+ if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
+ raise InvalidVersion(
+ 'API versions below {0} are no longer supported by this '
+ 'library.'.format(MINIMUM_DOCKER_API_VERSION)
+ )
+
+ def _retrieve_server_version(self):
+ try:
+ return self.version(api_version=False)["ApiVersion"]
+ except KeyError:
+ raise DockerException(
+ 'Invalid response from docker daemon: key "ApiVersion"'
+ ' is missing.'
+ )
+ except Exception as e:
+ raise DockerException(
+ 'Error while fetching server API version: {0}'.format(e)
+ )
+
+ def _set_request_timeout(self, kwargs):
+ """Prepare the kwargs for an HTTP request by inserting the timeout
+ parameter, if not already present."""
+ kwargs.setdefault('timeout', self.timeout)
+ return kwargs
+
+ @update_headers
+ def _post(self, url, **kwargs):
+ return self.post(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _get(self, url, **kwargs):
+ return self.get(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _head(self, url, **kwargs):
+ return self.head(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _put(self, url, **kwargs):
+ return self.put(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _delete(self, url, **kwargs):
+ return self.delete(url, **self._set_request_timeout(kwargs))
+
+ def _url(self, pathfmt, *args, **kwargs):
+ for arg in args:
+ if not isinstance(arg, string_types):
+ raise ValueError(
+ 'Expected a string but found {0} ({1}) '
+ 'instead'.format(arg, type(arg))
+ )
+
+ quote_f = partial(quote, safe="/:")
+ args = map(quote_f, args)
+
+ if kwargs.get('versioned_api', True):
+ return '{0}/v{1}{2}'.format(
+ self.base_url, self._version, pathfmt.format(*args)
+ )
+ else:
+ return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+
+ def _raise_for_status(self, response):
+ """Raises stored :class:`APIError`, if one occurred."""
+ try:
+ response.raise_for_status()
+ except _HTTPError as e:
+ raise_from(create_api_error_from_http_exception(e), e)
+
+ def _result(self, response, json=False, binary=False):
+ if json and binary:
+ raise AssertionError('json and binary must not be both True')
+ self._raise_for_status(response)
+
+ if json:
+ return response.json()
+ if binary:
+ return response.content
+ return response.text
+
+ def _post_json(self, url, data, **kwargs):
+ # Go <1.1 can't unserialize null to a string
+ # so we do this disgusting thing here.
+ data2 = {}
+ if data is not None and isinstance(data, dict):
+ for k, v in iteritems(data):
+ if v is not None:
+ data2[k] = v
+ elif data is not None:
+ data2 = data
+
+ if 'headers' not in kwargs:
+ kwargs['headers'] = {}
+ kwargs['headers']['Content-Type'] = 'application/json'
+ return self._post(url, data=json.dumps(data2), **kwargs)
+
+ def _attach_params(self, override=None):
+ return override or {
+ 'stdout': 1,
+ 'stderr': 1,
+ 'stream': 1
+ }
+
+ def _get_raw_response_socket(self, response):
+ self._raise_for_status(response)
+ if self.base_url == "http+docker://localnpipe":
+ sock = response.raw._fp.fp.raw.sock
+ elif self.base_url.startswith('http+docker://ssh'):
+ sock = response.raw._fp.fp.channel
+ elif PY3:
+ sock = response.raw._fp.fp.raw
+ if self.base_url.startswith("https://"):
+ sock = sock._sock
+ else:
+ sock = response.raw._fp.fp._sock
+ try:
+ # Keep a reference to the response to stop it being garbage
+ # collected. If the response is garbage collected, it will
+ # close TLS sockets.
+ sock._response = response
+ except AttributeError:
+ # UNIX sockets can't have attributes set on them, but that's
+ # fine because we won't be doing TLS over them
+ pass
+
+ return sock
+
+ def _stream_helper(self, response, decode=False):
+ """Generator for data coming from a chunked-encoded HTTP response."""
+
+ if response.raw._fp.chunked:
+ if decode:
+ for chunk in json_stream.json_stream(self._stream_helper(response, False)):
+ yield chunk
+ else:
+ reader = response.raw
+ while not reader.closed:
+ # this read call will block until we get a chunk
+ data = reader.read(1)
+ if not data:
+ break
+ if reader._fp.chunk_left:
+ data += reader.read(reader._fp.chunk_left)
+ yield data
+ else:
+ # Response isn't chunked, meaning we probably
+ # encountered an error immediately
+ yield self._result(response, json=decode)
+
+ def _multiplexed_buffer_helper(self, response):
+ """A generator of multiplexed data blocks read from a buffered
+ response."""
+ buf = self._result(response, binary=True)
+ buf_length = len(buf)
+ walker = 0
+ while True:
+ if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
+ break
+ header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
+ dummy, length = struct.unpack_from('>BxxxL', header)
+ start = walker + STREAM_HEADER_SIZE_BYTES
+ end = start + length
+ walker = end
+ yield buf[start:end]
+
+ def _multiplexed_response_stream_helper(self, response):
+ """A generator of multiplexed data blocks coming from a response
+ stream."""
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ while True:
+ header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
+ if not header:
+ break
+ dummy, length = struct.unpack('>BxxxL', header)
+ if not length:
+ continue
+ data = response.raw.read(length)
+ if not data:
+ break
+ yield data
+
+ def _stream_raw_result(self, response, chunk_size=1, decode=True):
+ ''' Stream result for TTY-enabled container and raw binary data'''
+ self._raise_for_status(response)
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ for out in response.iter_content(chunk_size, decode):
+ yield out
+
+ def _read_from_socket(self, response, stream, tty=True, demux=False):
+ """Consume all data from the socket, close the response and return the
+ data. If stream=True, then a generator is returned instead and the
+ caller is responsible for closing the response.
+ """
+ socket = self._get_raw_response_socket(response)
+
+ gen = frames_iter(socket, tty)
+
+ if demux:
+ # The generator will output tuples (stdout, stderr)
+ gen = (demux_adaptor(*frame) for frame in gen)
+ else:
+ # The generator will output strings
+ gen = (data for (dummy, data) in gen)
+
+ if stream:
+ return gen
+ else:
+ try:
+ # Wait for all the frames, concatenate them, and return the result
+ return consume_socket_output(gen, demux=demux)
+ finally:
+ response.close()
+
+ def _disable_socket_timeout(self, socket):
+ """ Depending on the combination of python version and whether we're
+ connecting over http or https, we might need to access _sock, which
+ may or may not exist; or we may need to just settimeout on socket
+ itself, which also may or may not have settimeout on it. To avoid
+ missing the correct one, we try both.
+
+ We also do not want to set the timeout if it is already disabled, as
+ you run the risk of changing a socket that was non-blocking to
+ blocking, for example when using gevent.
+ """
+ sockets = [socket, getattr(socket, '_sock', None)]
+
+ for s in sockets:
+ if not hasattr(s, 'settimeout'):
+ continue
+
+ timeout = -1
+
+ if hasattr(s, 'gettimeout'):
+ timeout = s.gettimeout()
+
+ # Don't change the timeout if it is already disabled.
+ if timeout is None or timeout == 0.0:
+ continue
+
+ s.settimeout(None)
+
+ @check_resource('container')
+ def _check_is_tty(self, container):
+ cont = self.inspect_container(container)
+ return cont['Config']['Tty']
+
+ def _get_result(self, container, stream, res):
+ return self._get_result_tty(stream, res, self._check_is_tty(container))
+
+ def _get_result_tty(self, stream, res, is_tty):
+ # We should also use raw streaming (without keep-alives)
+ # if we're dealing with a tty-enabled container.
+ if is_tty:
+ return self._stream_raw_result(res) if stream else \
+ self._result(res, binary=True)
+
+ self._raise_for_status(res)
+ sep = binary_type()
+ if stream:
+ return self._multiplexed_response_stream_helper(res)
+ else:
+ return sep.join(
+ list(self._multiplexed_buffer_helper(res))
+ )
+
+ def _unmount(self, *args):
+ for proto in args:
+ self.adapters.pop(proto)
+
+ def get_adapter(self, url):
+ try:
+ return super(APIClient, self).get_adapter(url)
+ except _InvalidSchema as e:
+ if self._custom_adapter:
+ return self._custom_adapter
+ else:
+ raise e
+
+ @property
+ def api_version(self):
+ return self._version
+
+ def reload_config(self, dockercfg_path=None):
+ """
+ Force a reload of the auth configuration
+
+ Args:
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise ``$HOME/.dockercfg``)
+
+ Returns:
+ None
+ """
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
+
+ def _set_auth_headers(self, headers):
+ log.debug('Looking for auth config')
+
+ # If we don't have any auth data so far, try reloading the config
+ # file one more time in case anything showed up in there.
+ if not self._auth_configs or self._auth_configs.is_empty:
+ log.debug("No auth config in memory - loading from filesystem")
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
+
+ # Send the full auth configuration (if any exists), since the build
+ # could use any (or all) of the registries.
+ if self._auth_configs:
+ auth_data = self._auth_configs.get_all_credentials()
+
+ # See https://github.com/docker/docker-py/issues/1683
+ if (auth.INDEX_URL not in auth_data and
+ auth.INDEX_NAME in auth_data):
+ auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
+
+ log.debug(
+ 'Sending auth config (%s)',
+ ', '.join(repr(k) for k in auth_data.keys())
+ )
+
+ if auth_data:
+ headers['X-Registry-Config'] = auth.encode_header(
+ auth_data
+ )
+ else:
+ log.debug('No auth config found')
+
+ def get_binary(self, pathfmt, *args, **kwargs):
+ return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), binary=True)
+
+ def get_json(self, pathfmt, *args, **kwargs):
+ return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
+
+ def get_text(self, pathfmt, *args, **kwargs):
+ return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs))
+
+ def get_raw_stream(self, pathfmt, *args, **kwargs):
+ chunk_size = kwargs.pop('chunk_size', DEFAULT_DATA_CHUNK_SIZE)
+ res = self._get(self._url(pathfmt, *args, versioned_api=True), stream=True, **kwargs)
+ self._raise_for_status(res)
+ return self._stream_raw_result(res, chunk_size, False)
+
+ def delete_call(self, pathfmt, *args, **kwargs):
+ self._raise_for_status(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs))
+
+ def delete_json(self, pathfmt, *args, **kwargs):
+ return self._result(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
+
+ def post_call(self, pathfmt, *args, **kwargs):
+ self._raise_for_status(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs))
+
+ def post_json(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ self._raise_for_status(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs))
+
+ def post_json_to_binary(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), binary=True)
+
+ def post_json_to_json(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), json=True)
+
+ def post_json_to_text(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+
+ def post_json_to_stream_socket(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ headers = (kwargs.pop('headers', None) or {}).copy()
+ headers.update({
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp',
+ })
+ return self._get_raw_response_socket(
+ self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs))
+
+ def post_json_to_stream(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ headers = (kwargs.pop('headers', None) or {}).copy()
+ headers.update({
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp',
+ })
+ stream = kwargs.pop('stream', False)
+ demux = kwargs.pop('demux', False)
+ tty = kwargs.pop('tty', False)
+ return self._read_from_socket(
+ self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs),
+ stream,
+ tty=tty,
+ demux=demux
+ )
+
+ def post_to_json(self, pathfmt, *args, **kwargs):
+ return self._result(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/api/daemon.py b/ansible_collections/community/docker/plugins/module_utils/_api/api/daemon.py
new file mode 100644
index 00000000..9e7adbf3
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/api/daemon.py
@@ -0,0 +1,196 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+from datetime import datetime
+
+from .. import auth
+from ..utils.utils import datetime_to_timestamp, convert_filters
+from ..utils.decorators import minimum_version
+from ..types.daemon import CancellableStream
+
+
+class DaemonApiMixin(object):
+ @minimum_version('1.25')
+ def df(self):
+ """
+ Get data usage information.
+
+ Returns:
+ (dict): A dictionary representing different resource categories
+ and their respective data usage.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/system/df')
+ return self._result(self._get(url), True)
+
+ def events(self, since=None, until=None, filters=None, decode=None):
+ """
+ Get real-time events from the server. Similar to the ``docker events``
+ command.
+
+ Args:
+ since (UTC datetime or int): Get events from this point
+ until (UTC datetime or int): Get events until this point
+ filters (dict): Filter the events by event time, container or image
+ decode (bool): If set to true, stream will be decoded into dicts on
+ the fly. False by default.
+
+ Returns:
+ A :py:class:`docker.types.daemon.CancellableStream` generator
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for event in client.events(decode=True)
+ ... print(event)
+ {u'from': u'image/with:tag',
+ u'id': u'container-id',
+ u'status': u'start',
+ u'time': 1423339459}
+ ...
+
+ or
+
+ >>> events = client.events()
+ >>> for event in events:
+ ... print(event)
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ if isinstance(since, datetime):
+ since = datetime_to_timestamp(since)
+
+ if isinstance(until, datetime):
+ until = datetime_to_timestamp(until)
+
+ if filters:
+ filters = convert_filters(filters)
+
+ params = {
+ 'since': since,
+ 'until': until,
+ 'filters': filters
+ }
+ url = self._url('/events')
+
+ response = self._get(url, params=params, stream=True, timeout=None)
+ stream = self._stream_helper(response, decode=decode)
+
+ return CancellableStream(stream, response)
+
+ def info(self):
+ """
+ Display system-wide information. Identical to the ``docker info``
+ command.
+
+ Returns:
+ (dict): The info as a dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url("/info")), True)
+
+ def login(self, username, password=None, email=None, registry=None,
+ reauth=False, dockercfg_path=None):
+ """
+ Authenticate with a registry. Similar to the ``docker login`` command.
+
+ Args:
+ username (str): The registry username
+ password (str): The plaintext password
+ email (str): The email for the registry account
+ registry (str): URL to the registry. E.g.
+ ``https://index.docker.io/v1/``
+ reauth (bool): Whether or not to refresh existing authentication on
+ the Docker server.
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise ``$HOME/.dockercfg``)
+
+ Returns:
+ (dict): The response from the login request
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ # If we don't have any auth data so far, try reloading the config file
+ # one more time in case anything showed up in there.
+ # If dockercfg_path is passed check to see if the config file exists,
+ # if so load that config.
+ if dockercfg_path and os.path.exists(dockercfg_path):
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
+ elif not self._auth_configs or self._auth_configs.is_empty:
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
+
+ authcfg = self._auth_configs.resolve_authconfig(registry)
+ # If we found an existing auth config for this registry and username
+ # combination, we can return it immediately unless reauth is requested.
+ if authcfg and authcfg.get('username', None) == username \
+ and not reauth:
+ return authcfg
+
+ req_data = {
+ 'username': username,
+ 'password': password,
+ 'email': email,
+ 'serveraddress': registry,
+ }
+
+ response = self._post_json(self._url('/auth'), data=req_data)
+ if response.status_code == 200:
+ self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
+ return self._result(response, json=True)
+
+ def ping(self):
+ """
+ Checks the server is responsive. An exception will be raised if it
+ isn't responding.
+
+ Returns:
+ (bool) The response from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url('/_ping'))) == 'OK'
+
+ def version(self, api_version=True):
+ """
+ Returns version information from the server. Similar to the ``docker
+ version`` command.
+
+ Returns:
+ (dict): The server version information
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/version", versioned_api=api_version)
+ return self._result(self._get(url), json=True)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/auth.py b/ansible_collections/community/docker/plugins/module_utils/_api/auth.py
new file mode 100644
index 00000000..a172ced5
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/auth.py
@@ -0,0 +1,388 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import logging
+
+from ansible.module_utils.six import iteritems, string_types
+
+from . import errors
+from .credentials.store import Store
+from .credentials.errors import StoreError, CredentialsNotFound
+from .utils import config
+
+INDEX_NAME = 'docker.io'
+INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
+TOKEN_USERNAME = '<token>'
+
+log = logging.getLogger(__name__)
+
+
+def resolve_repository_name(repo_name):
+ if '://' in repo_name:
+ raise errors.InvalidRepository(
+ 'Repository name cannot contain a scheme ({0})'.format(repo_name)
+ )
+
+ index_name, remote_name = split_repo_name(repo_name)
+ if index_name[0] == '-' or index_name[-1] == '-':
+ raise errors.InvalidRepository(
+ 'Invalid index name ({0}). Cannot begin or end with a'
+ ' hyphen.'.format(index_name)
+ )
+ return resolve_index_name(index_name), remote_name
+
+
+def resolve_index_name(index_name):
+ index_name = convert_to_hostname(index_name)
+ if index_name == 'index.' + INDEX_NAME:
+ index_name = INDEX_NAME
+ return index_name
+
+
+def get_config_header(client, registry):
+ log.debug('Looking for auth config')
+ if not client._auth_configs or client._auth_configs.is_empty:
+ log.debug(
+ "No auth config in memory - loading from filesystem"
+ )
+ client._auth_configs = load_config(credstore_env=client.credstore_env)
+ authcfg = resolve_authconfig(
+ client._auth_configs, registry, credstore_env=client.credstore_env
+ )
+ # Do not fail here if no authentication exists for this
+ # specific registry as we can have a readonly pull. Just
+ # put the header if we can.
+ if authcfg:
+ log.debug('Found auth config')
+ # auth_config needs to be a dict in the format used by
+ # auth.py username , password, serveraddress, email
+ return encode_header(authcfg)
+ log.debug('No auth config found')
+ return None
+
+
+def split_repo_name(repo_name):
+ parts = repo_name.split('/', 1)
+ if len(parts) == 1 or (
+ '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
+ ):
+ # This is a docker index repo (ex: username/foobar or ubuntu)
+ return INDEX_NAME, repo_name
+ return tuple(parts)
+
+
+def get_credential_store(authconfig, registry):
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig)
+ return authconfig.get_credential_store(registry)
+
+
+class AuthConfig(dict):
+ def __init__(self, dct, credstore_env=None):
+ if 'auths' not in dct:
+ dct['auths'] = {}
+ self.update(dct)
+ self._credstore_env = credstore_env
+ self._stores = {}
+
+ @classmethod
+ def parse_auth(cls, entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ conf = {}
+ for registry, entry in iteritems(entries):
+ if not isinstance(entry, dict):
+ log.debug('Config entry for key %s is not auth config', registry)
+ # We sometimes fall back to parsing the whole config as if it
+ # was the auth config by itself, for legacy purposes. In that
+ # case, we fail silently and return an empty conf if any of the
+ # keys is not formatted properly.
+ if raise_on_error:
+ raise errors.InvalidConfigFile(
+ 'Invalid configuration for registry {0}'.format(
+ registry
+ )
+ )
+ return {}
+ if 'identitytoken' in entry:
+ log.debug('Found an IdentityToken entry for registry %s', registry)
+ conf[registry] = {
+ 'IdentityToken': entry['identitytoken']
+ }
+ continue # Other values are irrelevant if we have a token
+
+ if 'auth' not in entry:
+ # Starting with engine v1.11 (API 1.23), an empty dictionary is
+ # a valid value in the auths config.
+ # https://github.com/docker/compose/issues/3265
+ log.debug('Auth data for %s is absent. Client might be using a credentials store instead.', registry)
+ conf[registry] = {}
+ continue
+
+ username, password = decode_auth(entry['auth'])
+ log.debug('Found entry (registry=%s, username=%s)', repr(registry), repr(username))
+
+ conf[registry] = {
+ 'username': username,
+ 'password': password,
+ 'email': entry.get('email'),
+ 'serveraddress': registry,
+ }
+ return conf
+
+ @classmethod
+ def load_config(cls, config_path, config_dict, credstore_env=None):
+ """
+ Loads authentication data from a Docker configuration file in the given
+ root directory or if config_path is passed use given path.
+ Lookup priority:
+ explicit config_path parameter > DOCKER_CONFIG environment
+ variable > ~/.docker/config.json > ~/.dockercfg
+ """
+
+ if not config_dict:
+ config_file = config.find_config_file(config_path)
+
+ if not config_file:
+ return cls({}, credstore_env)
+ try:
+ with open(config_file) as f:
+ config_dict = json.load(f)
+ except (IOError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
+ log.debug(e)
+ return cls(_load_legacy_config(config_file), credstore_env)
+
+ res = {}
+ if config_dict.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update({
+ 'auths': cls.parse_auth(
+ config_dict.pop('auths'), raise_on_error=True
+ )
+ })
+ if config_dict.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': config_dict.pop('credsStore')})
+ if config_dict.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': config_dict.pop('credHelpers')})
+ if res:
+ return cls(res, credstore_env)
+
+ log.debug(
+ "Couldn't find auth-related section ; attempting to interpret "
+ "as auth-only file"
+ )
+ return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
+
+ @property
+ def auths(self):
+ return self.get('auths', {})
+
+ @property
+ def creds_store(self):
+ return self.get('credsStore', None)
+
+ @property
+ def cred_helpers(self):
+ return self.get('credHelpers', {})
+
+ @property
+ def is_empty(self):
+ return (
+ not self.auths and not self.creds_store and not self.cred_helpers
+ )
+
+ def resolve_authconfig(self, registry=None):
+ """
+ Returns the authentication data from the given auth configuration for a
+ specific registry. As with the Docker client, legacy entries in the
+ config with full URLs are stripped down to hostnames before checking
+ for a match. Returns None if no match was found.
+ """
+
+ if self.creds_store or self.cred_helpers:
+ store_name = self.get_credential_store(registry)
+ if store_name is not None:
+ log.debug('Using credentials store "%s"', store_name)
+ cfg = self._resolve_authconfig_credstore(registry, store_name)
+ if cfg is not None:
+ return cfg
+ log.debug('No entry in credstore - fetching from auth dict')
+
+ # Default to the public index server
+ registry = resolve_index_name(registry) if registry else INDEX_NAME
+ log.debug("Looking for auth entry for %s", repr(registry))
+
+ if registry in self.auths:
+ log.debug("Found %s", repr(registry))
+ return self.auths[registry]
+
+ for key, conf in iteritems(self.auths):
+ if resolve_index_name(key) == registry:
+ log.debug("Found %s", repr(key))
+ return conf
+
+ log.debug("No entry found")
+ return None
+
+ def _resolve_authconfig_credstore(self, registry, credstore_name):
+ if not registry or registry == INDEX_NAME:
+ # The ecosystem is a little schizophrenic with index.docker.io VS
+ # docker.io - in that case, it seems the full URL is necessary.
+ registry = INDEX_URL
+ log.debug("Looking for auth entry for %s", repr(registry))
+ store = self._get_store_instance(credstore_name)
+ try:
+ data = store.get(registry)
+ res = {
+ 'ServerAddress': registry,
+ }
+ if data['Username'] == TOKEN_USERNAME:
+ res['IdentityToken'] = data['Secret']
+ else:
+ res.update({
+ 'Username': data['Username'],
+ 'Password': data['Secret'],
+ })
+ return res
+ except CredentialsNotFound:
+ log.debug('No entry found')
+ return None
+ except StoreError as e:
+ raise errors.DockerException(
+ 'Credentials store error: {0}'.format(repr(e))
+ )
+
+ def _get_store_instance(self, name):
+ if name not in self._stores:
+ self._stores[name] = Store(
+ name, environment=self._credstore_env
+ )
+ return self._stores[name]
+
+ def get_credential_store(self, registry):
+ if not registry or registry == INDEX_NAME:
+ registry = INDEX_URL
+
+ return self.cred_helpers.get(registry) or self.creds_store
+
+ def get_all_credentials(self):
+ auth_data = self.auths.copy()
+ if self.creds_store:
+ # Retrieve all credentials from the default store
+ store = self._get_store_instance(self.creds_store)
+ for k in store.list().keys():
+ auth_data[k] = self._resolve_authconfig_credstore(
+ k, self.creds_store
+ )
+ auth_data[convert_to_hostname(k)] = auth_data[k]
+
+ # credHelpers entries take priority over all others
+ for reg, store_name in self.cred_helpers.items():
+ auth_data[reg] = self._resolve_authconfig_credstore(
+ reg, store_name
+ )
+ auth_data[convert_to_hostname(reg)] = auth_data[reg]
+
+ return auth_data
+
+ def add_auth(self, reg, data):
+ self['auths'][reg] = data
+
+
+def resolve_authconfig(authconfig, registry=None, credstore_env=None):
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig, credstore_env)
+ return authconfig.resolve_authconfig(registry)
+
+
+def convert_to_hostname(url):
+ return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
+
+
+def decode_auth(auth):
+ if isinstance(auth, string_types):
+ auth = auth.encode('ascii')
+ s = base64.b64decode(auth)
+ login, pwd = s.split(b':', 1)
+ return login.decode('utf8'), pwd.decode('utf8')
+
+
+def encode_header(auth):
+ auth_json = json.dumps(auth).encode('ascii')
+ return base64.urlsafe_b64encode(auth_json)
+
+
+def parse_auth(entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ return AuthConfig.parse_auth(entries, raise_on_error)
+
+
+def load_config(config_path=None, config_dict=None, credstore_env=None):
+ return AuthConfig.load_config(config_path, config_dict, credstore_env)
+
+
+def _load_legacy_config(config_file):
+ log.debug("Attempting to parse legacy auth file format")
+ try:
+ data = []
+ with open(config_file) as f:
+ for line in f.readlines():
+ data.append(line.strip().split(' = ')[1])
+ if len(data) < 2:
+ # Not enough data
+ raise errors.InvalidConfigFile(
+ 'Invalid or empty configuration file!'
+ )
+
+ username, password = decode_auth(data[0])
+ return {'auths': {
+ INDEX_NAME: {
+ 'username': username,
+ 'password': password,
+ 'email': data[1],
+ 'serveraddress': INDEX_URL,
+ }
+ }}
+ except Exception as e:
+ log.debug(e)
+ pass
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/constants.py b/ansible_collections/community/docker/plugins/module_utils/_api/constants.py
new file mode 100644
index 00000000..23e10b13
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/constants.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+DEFAULT_DOCKER_API_VERSION = '1.41'
+MINIMUM_DOCKER_API_VERSION = '1.21'
+DEFAULT_TIMEOUT_SECONDS = 60
+STREAM_HEADER_SIZE_BYTES = 8
+CONTAINER_LIMITS_KEYS = [
+ 'memory', 'memswap', 'cpushares', 'cpusetcpus'
+]
+
+DEFAULT_HTTP_HOST = "127.0.0.1"
+DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
+DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
+
+BYTE_UNITS = {
+ 'b': 1,
+ 'k': 1024,
+ 'm': 1024 * 1024,
+ 'g': 1024 * 1024 * 1024
+}
+
+IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
+WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
+
+DEFAULT_USER_AGENT = "ansible-community.docker"
+DEFAULT_NUM_POOLS = 25
+
+# The OpenSSH server default value for MaxSessions is 10 which means we can
+# use up to 9, leaving the final session for the underlying SSH connection.
+# For more details see: https://github.com/docker/docker-py/issues/2246
+DEFAULT_NUM_POOLS_SSH = 9
+
+DEFAULT_MAX_POOL_SIZE = 10
+
+DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
+
+DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
+DEFAULT_SWARM_SUBNET_SIZE = 24
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/credentials/constants.py b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/constants.py
new file mode 100644
index 00000000..c52bc564
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/constants.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+PROGRAM_PREFIX = 'docker-credential-'
+DEFAULT_LINUX_STORE = 'secretservice'
+DEFAULT_OSX_STORE = 'osxkeychain'
+DEFAULT_WIN32_STORE = 'wincred'
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/credentials/errors.py b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/errors.py
new file mode 100644
index 00000000..0047e8e4
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/errors.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class StoreError(RuntimeError):
+ pass
+
+
+class CredentialsNotFound(StoreError):
+ pass
+
+
+class InitializationError(StoreError):
+ pass
+
+
+def process_store_error(cpe, program):
+ message = cpe.output.decode('utf-8')
+ if 'credentials not found in native keychain' in message:
+ return CredentialsNotFound(
+ 'No matching credentials in {0}'.format(
+ program
+ )
+ )
+ return StoreError(
+ 'Credentials store {0} exited with "{1}".'.format(
+ program, cpe.output.decode('utf-8').strip()
+ )
+ )
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/credentials/store.py b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/store.py
new file mode 100644
index 00000000..e1bd28e3
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/store.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import json
+import subprocess
+
+from ansible.module_utils.six import PY3, binary_type
+
+from . import constants
+from . import errors
+from .utils import create_environment_dict
+from .utils import find_executable
+
+
+class Store(object):
+ def __init__(self, program, environment=None):
+ """ Create a store object that acts as an interface to
+ perform the basic operations for storing, retrieving
+ and erasing credentials using `program`.
+ """
+ self.program = constants.PROGRAM_PREFIX + program
+ self.exe = find_executable(self.program)
+ self.environment = environment
+ if self.exe is None:
+ raise errors.InitializationError(
+ '{0} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+
+ def get(self, server):
+ """ Retrieve credentials for `server`. If no credentials are found,
+ a `StoreError` will be raised.
+ """
+ if not isinstance(server, binary_type):
+ server = server.encode('utf-8')
+ data = self._execute('get', server)
+ result = json.loads(data.decode('utf-8'))
+
+ # docker-credential-pass will return an object for inexistent servers
+ # whereas other helpers will exit with returncode != 0. For
+ # consistency, if no significant data is returned,
+ # raise CredentialsNotFound
+ if result['Username'] == '' and result['Secret'] == '':
+ raise errors.CredentialsNotFound(
+ 'No matching credentials in {0}'.format(self.program)
+ )
+
+ return result
+
+ def store(self, server, username, secret):
+ """ Store credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ data_input = json.dumps({
+ 'ServerURL': server,
+ 'Username': username,
+ 'Secret': secret
+ }).encode('utf-8')
+ return self._execute('store', data_input)
+
+ def erase(self, server):
+ """ Erase credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ if not isinstance(server, binary_type):
+ server = server.encode('utf-8')
+ self._execute('erase', server)
+
+ def list(self):
+ """ List stored credentials. Requires v0.4.0+ of the helper.
+ """
+ data = self._execute('list', None)
+ return json.loads(data.decode('utf-8'))
+
+ def _execute(self, subcmd, data_input):
+ output = None
+ env = create_environment_dict(self.environment)
+ try:
+ if PY3:
+ output = subprocess.check_output(
+ [self.exe, subcmd], input=data_input, env=env,
+ )
+ else:
+ process = subprocess.Popen(
+ [self.exe, subcmd], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, env=env,
+ )
+ output, dummy = process.communicate(data_input)
+ if process.returncode != 0:
+ raise subprocess.CalledProcessError(
+ returncode=process.returncode, cmd='', output=output
+ )
+ except subprocess.CalledProcessError as e:
+ raise errors.process_store_error(e, self.program)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise errors.StoreError(
+ '{0} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+ else:
+ raise errors.StoreError(
+ 'Unexpected OS error "{0}", errno={1}'.format(
+ e.strerror, e.errno
+ )
+ )
+ return output
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/credentials/utils.py b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/utils.py
new file mode 100644
index 00000000..1ab84fe5
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/utils.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ansible.module_utils.six import PY2
+
+if PY2:
+ from distutils.spawn import find_executable as which
+else:
+ from shutil import which
+
+
+def find_executable(executable, path=None):
+ """
+ As distutils.spawn.find_executable, but on Windows, look up
+ every extension declared in PATHEXT instead of just `.exe`
+ """
+ if not PY2:
+ # shutil.which() already uses PATHEXT on Windows, so on
+ # Python 3 we can simply use shutil.which() in all cases.
+ # (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
+ return which(executable, path=path)
+
+ if sys.platform != 'win32':
+ return which(executable, path)
+
+ if path is None:
+ path = os.environ['PATH']
+
+ paths = path.split(os.pathsep)
+ extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
+ base, ext = os.path.splitext(executable)
+
+ if not os.path.isfile(executable):
+ for p in paths:
+ for ext in extensions:
+ f = os.path.join(p, base + ext)
+ if os.path.isfile(f):
+ return f
+ return None
+ else:
+ return executable
+
+
+def create_environment_dict(overrides):
+ """
+ Create and return a copy of os.environ with the specified overrides
+ """
+ result = os.environ.copy()
+ result.update(overrides or {})
+ return result
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/errors.py b/ansible_collections/community/docker/plugins/module_utils/_api/errors.py
new file mode 100644
index 00000000..90dd5aad
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/errors.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ._import_helper import HTTPError as _HTTPError
+
+from ansible.module_utils.six import raise_from
+
+
+class DockerException(Exception):
+ """
+ A base class from which all other exceptions inherit.
+
+ If you want to catch all errors that the Docker SDK might raise,
+ catch this base exception.
+ """
+
+
+def create_api_error_from_http_exception(e):
+ """
+ Create a suitable APIError from requests.exceptions.HTTPError.
+ """
+ response = e.response
+ try:
+ explanation = response.json()['message']
+ except ValueError:
+ explanation = (response.content or '').strip()
+ cls = APIError
+ if response.status_code == 404:
+ if explanation and ('No such image' in str(explanation) or
+ 'not found: does not exist or no pull access'
+ in str(explanation) or
+ 'repository does not exist' in str(explanation)):
+ cls = ImageNotFound
+ else:
+ cls = NotFound
+ raise_from(cls(e, response=response, explanation=explanation), e)
+
+
+class APIError(_HTTPError, DockerException):
+ """
+ An HTTP error from the API.
+ """
+ def __init__(self, message, response=None, explanation=None):
+ # requests 1.2 supports response as a keyword argument, but
+ # requests 1.1 doesn't
+ super(APIError, self).__init__(message)
+ self.response = response
+ self.explanation = explanation
+
+ def __str__(self):
+ message = super(APIError, self).__str__()
+
+ if self.is_client_error():
+ message = '{0} Client Error for {1}: {2}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
+
+ elif self.is_server_error():
+ message = '{0} Server Error for {1}: {2}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
+
+ if self.explanation:
+ message = '{0} ("{1}")'.format(message, self.explanation)
+
+ return message
+
+ @property
+ def status_code(self):
+ if self.response is not None:
+ return self.response.status_code
+
+ def is_error(self):
+ return self.is_client_error() or self.is_server_error()
+
+ def is_client_error(self):
+ if self.status_code is None:
+ return False
+ return 400 <= self.status_code < 500
+
+ def is_server_error(self):
+ if self.status_code is None:
+ return False
+ return 500 <= self.status_code < 600
+
+
+class NotFound(APIError):
+ pass
+
+
+class ImageNotFound(NotFound):
+ pass
+
+
+class InvalidVersion(DockerException):
+ pass
+
+
+class InvalidRepository(DockerException):
+ pass
+
+
+class InvalidConfigFile(DockerException):
+ pass
+
+
+class InvalidArgument(DockerException):
+ pass
+
+
+class DeprecatedMethod(DockerException):
+ pass
+
+
+class TLSParameterError(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg + (". TLS configurations should map the Docker CLI "
+ "client configurations. See "
+ "https://docs.docker.com/engine/articles/https/ "
+ "for API details.")
+
+
+class NullResource(DockerException, ValueError):
+ pass
+
+
+class ContainerError(DockerException):
+ """
+ Represents a container that has exited with a non-zero exit code.
+ """
+ def __init__(self, container, exit_status, command, image, stderr):
+ self.container = container
+ self.exit_status = exit_status
+ self.command = command
+ self.image = image
+ self.stderr = stderr
+
+ err = ": {0}".format(stderr) if stderr is not None else ""
+ msg = ("Command '{0}' in image '{1}' returned non-zero exit "
+ "status {2}{3}").format(command, image, exit_status, err)
+
+ super(ContainerError, self).__init__(msg)
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class BuildError(DockerException):
+ def __init__(self, reason, build_log):
+ super(BuildError, self).__init__(reason)
+ self.msg = reason
+ self.build_log = build_log
+
+
+class ImageLoadError(DockerException):
+ pass
+
+
+def create_unexpected_kwargs_error(name, kwargs):
+ quoted_kwargs = ["'{0}'".format(k) for k in sorted(kwargs)]
+ text = ["{0}() ".format(name)]
+ if len(quoted_kwargs) == 1:
+ text.append("got an unexpected keyword argument ")
+ else:
+ text.append("got unexpected keyword arguments ")
+ text.append(', '.join(quoted_kwargs))
+ return TypeError(''.join(text))
+
+
+class MissingContextParameter(DockerException):
+ def __init__(self, param):
+ self.param = param
+
+ def __str__(self):
+ return ("missing parameter: {0}".format(self.param))
+
+
+class ContextAlreadyExists(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return ("context {0} already exists".format(self.name))
+
+
+class ContextException(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return (self.msg)
+
+
+class ContextNotFound(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return ("context '{0}' not found".format(self.name))
+
+
+class MissingRequirementException(DockerException):
+ def __init__(self, msg, requirement, import_exception):
+ self.msg = msg
+ self.requirement = requirement
+ self.import_exception = import_exception
+
+ def __str__(self):
+ return (self.msg)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/tls.py b/ansible_collections/community/docker/plugins/module_utils/_api/tls.py
new file mode 100644
index 00000000..ed5416d8
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/tls.py
@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import ssl
+import sys
+
+from . import errors
+from .transport.ssladapter import SSLHTTPAdapter
+
+
+class TLSConfig(object):
+ """
+ TLS configuration.
+
+ Args:
+ client_cert (tuple of str): Path to client cert, path to client key.
+ ca_cert (str): Path to CA cert file.
+ verify (bool or str): This can be ``False`` or a path to a CA cert
+ file.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
+ cert = None
+ ca_cert = None
+ verify = None
+ ssl_version = None
+
+ def __init__(self, client_cert=None, ca_cert=None, verify=None,
+ ssl_version=None, assert_hostname=None,
+ assert_fingerprint=None):
+ # Argument compatibility/mapping with
+ # https://docs.docker.com/engine/articles/https/
+ # This diverges from the Docker CLI in that users can specify 'tls'
+ # here, but also disable any public/default CA pool verification by
+ # leaving verify=False
+
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ # If the user provides an SSL version, we should use their preference
+ if ssl_version:
+ self.ssl_version = ssl_version
+ elif (sys.version_info.major, sys.version_info.minor) < (3, 6):
+ # If the user provides no ssl version, we should default to
+ # TLSv1_2. This option is the most secure, and will work for the
+ # majority of users with reasonably up-to-date software. However,
+ # before doing so, detect openssl version to ensure we can support
+ # it.
+ if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
+ ssl, 'PROTOCOL_TLSv1_2'):
+ # If the OpenSSL version is high enough to support TLSv1_2,
+ # then we should use it.
+ self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
+ else:
+ # Otherwise, TLS v1.0 seems to be the safest default;
+ # SSLv23 fails in mysterious ways:
+ # https://github.com/docker/docker-py/issues/963
+ self.ssl_version = ssl.PROTOCOL_TLSv1
+ else:
+ self.ssl_version = ssl.PROTOCOL_TLS_CLIENT
+
+ # "client_cert" must have both or neither cert/key files. In
+ # either case, Alert the user when both are expected, but any are
+ # missing.
+
+ if client_cert:
+ try:
+ tls_cert, tls_key = client_cert
+ except ValueError:
+ raise errors.TLSParameterError(
+ 'client_cert must be a tuple of'
+ ' (client certificate, key file)'
+ )
+
+ if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
+ not os.path.isfile(tls_key)):
+ raise errors.TLSParameterError(
+ 'Path to a certificate and key files must be provided'
+ ' through the client_cert param'
+ )
+ self.cert = (tls_cert, tls_key)
+
+ # If verify is set, make sure the cert exists
+ self.verify = verify
+ self.ca_cert = ca_cert
+ if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
+ raise errors.TLSParameterError(
+ 'Invalid CA certificate provided for `ca_cert`.'
+ )
+
+ def configure_client(self, client):
+ """
+ Configure a client with these TLS options.
+ """
+ client.ssl_version = self.ssl_version
+
+ if self.verify and self.ca_cert:
+ client.verify = self.ca_cert
+ else:
+ client.verify = self.verify
+
+ if self.cert:
+ client.cert = self.cert
+
+ client.mount('https://', SSLHTTPAdapter(
+ ssl_version=self.ssl_version,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint,
+ ))
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/basehttpadapter.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/basehttpadapter.py
new file mode 100644
index 00000000..2afa60ae
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/basehttpadapter.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .._import_helper import HTTPAdapter as _HTTPAdapter
+
+
+class BaseHTTPAdapter(_HTTPAdapter):
+ def close(self):
+ super(BaseHTTPAdapter, self).close()
+ if hasattr(self, 'pools'):
+ self.pools.clear()
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipeconn.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipeconn.py
new file mode 100644
index 00000000..72a5c589
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipeconn.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves.queue import Empty
+
+from .. import constants
+from .._import_helper import HTTPAdapter, urllib3
+
+from .basehttpadapter import BaseHTTPAdapter
+from .npipesocket import NpipeSocket
+
+if PY3:
+ import http.client as httplib
+else:
+ import httplib
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class NpipeHTTPConnection(httplib.HTTPConnection, object):
+ def __init__(self, npipe_path, timeout=60):
+ super(NpipeHTTPConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def connect(self):
+ sock = NpipeSocket()
+ sock.settimeout(self.timeout)
+ sock.connect(self.npipe_path)
+ self.sock = sock
+
+
+class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ def __init__(self, npipe_path, timeout=60, maxsize=10):
+ super(NpipeHTTPConnectionPool, self).__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return NpipeHTTPConnection(
+ self.npipe_path, self.timeout
+ )
+
+ # When re-using connections, urllib3 tries to call select() on our
+ # NpipeSocket instance, causing a crash. To circumvent this, we override
+ # _get_conn, where that check happens.
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
+
+ except Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ )
+ pass # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class NpipeHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = HTTPAdapter.__attrs__ + ['npipe_path',
+ 'pools',
+ 'timeout',
+ 'max_pool_size']
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
+ self.npipe_path = base_url.replace('npipe://', '')
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(NpipeHTTPAdapter, self).__init__()
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = NpipeHTTPConnectionPool(
+ self.npipe_path, self.timeout,
+ maxsize=self.max_pool_size
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-sdk-python/issues/811
+ return request.path_url
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipesocket.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipesocket.py
new file mode 100644
index 00000000..f9d3ed66
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipesocket.py
@@ -0,0 +1,236 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import functools
+import io
+import time
+import traceback
+
+from ansible.module_utils.six import PY2
+
+PYWIN32_IMPORT_ERROR = None
+try:
+ import win32file
+ import win32pipe
+except ImportError:
+ PYWIN32_IMPORT_ERROR = traceback.format_exc()
+
+
+cERROR_PIPE_BUSY = 0xe7
+cSECURITY_SQOS_PRESENT = 0x100000
+cSECURITY_ANONYMOUS = 0
+
+MAXIMUM_RETRY_COUNT = 10
+
+
+def check_closed(f):
+ @functools.wraps(f)
+ def wrapped(self, *args, **kwargs):
+ if self._closed:
+ raise RuntimeError(
+ 'Can not reuse socket after connection was closed.'
+ )
+ return f(self, *args, **kwargs)
+ return wrapped
+
+
+class NpipeSocket(object):
+ """ Partial implementation of the socket API over windows named pipes.
+ This implementation is only designed to be used as a client socket,
+ and server-specific methods (bind, listen, accept...) are not
+ implemented.
+ """
+
+ def __init__(self, handle=None):
+ self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
+ self._handle = handle
+ self._closed = False
+
+ def accept(self):
+ raise NotImplementedError()
+
+ def bind(self, address):
+ raise NotImplementedError()
+
+ def close(self):
+ self._handle.Close()
+ self._closed = True
+
+ @check_closed
+ def connect(self, address, retry_count=0):
+ try:
+ handle = win32file.CreateFile(
+ address,
+ win32file.GENERIC_READ | win32file.GENERIC_WRITE,
+ 0,
+ None,
+ win32file.OPEN_EXISTING,
+ cSECURITY_ANONYMOUS | cSECURITY_SQOS_PRESENT,
+ 0
+ )
+ except win32pipe.error as e:
+ # See Remarks:
+ # https://msdn.microsoft.com/en-us/library/aa365800.aspx
+ if e.winerror == cERROR_PIPE_BUSY:
+ # Another program or thread has grabbed our pipe instance
+ # before we got to it. Wait for availability and attempt to
+ # connect again.
+ retry_count = retry_count + 1
+ if (retry_count < MAXIMUM_RETRY_COUNT):
+ time.sleep(1)
+ return self.connect(address, retry_count)
+ raise e
+
+ self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
+
+ self._handle = handle
+ self._address = address
+
+ @check_closed
+ def connect_ex(self, address):
+ return self.connect(address)
+
+ @check_closed
+ def detach(self):
+ self._closed = True
+ return self._handle
+
+ @check_closed
+ def dup(self):
+ return NpipeSocket(self._handle)
+
+ def getpeername(self):
+ return self._address
+
+ def getsockname(self):
+ return self._address
+
+ def getsockopt(self, level, optname, buflen=None):
+ raise NotImplementedError()
+
+ def ioctl(self, control, option):
+ raise NotImplementedError()
+
+ def listen(self, backlog):
+ raise NotImplementedError()
+
+ def makefile(self, mode=None, bufsize=None):
+ if mode.strip('b') != 'r':
+ raise NotImplementedError()
+ rawio = NpipeFileIOBase(self)
+ if bufsize is None or bufsize <= 0:
+ bufsize = io.DEFAULT_BUFFER_SIZE
+ return io.BufferedReader(rawio, buffer_size=bufsize)
+
+ @check_closed
+ def recv(self, bufsize, flags=0):
+ err, data = win32file.ReadFile(self._handle, bufsize)
+ return data
+
+ @check_closed
+ def recvfrom(self, bufsize, flags=0):
+ data = self.recv(bufsize, flags)
+ return (data, self._address)
+
+ @check_closed
+ def recvfrom_into(self, buf, nbytes=0, flags=0):
+ return self.recv_into(buf, nbytes, flags), self._address
+
+ @check_closed
+ def recv_into(self, buf, nbytes=0):
+ if PY2:
+ return self._recv_into_py2(buf, nbytes)
+
+ readbuf = buf
+ if not isinstance(buf, memoryview):
+ readbuf = memoryview(buf)
+
+ err, data = win32file.ReadFile(
+ self._handle,
+ readbuf[:nbytes] if nbytes else readbuf
+ )
+ return len(data)
+
+ def _recv_into_py2(self, buf, nbytes):
+ err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
+ n = len(data)
+ buf[:n] = data
+ return n
+
+ @check_closed
+ def send(self, string, flags=0):
+ err, nbytes = win32file.WriteFile(self._handle, string)
+ return nbytes
+
+ @check_closed
+ def sendall(self, string, flags=0):
+ return self.send(string, flags)
+
+ @check_closed
+ def sendto(self, string, address):
+ self.connect(address)
+ return self.send(string)
+
+ def setblocking(self, flag):
+ if flag:
+ return self.settimeout(None)
+ return self.settimeout(0)
+
+ def settimeout(self, value):
+ if value is None:
+ # Blocking mode
+ self._timeout = win32pipe.NMPWAIT_WAIT_FOREVER
+ elif not isinstance(value, (float, int)) or value < 0:
+ raise ValueError('Timeout value out of range')
+ elif value == 0:
+ # Non-blocking mode
+ self._timeout = win32pipe.NMPWAIT_NO_WAIT
+ else:
+ # Timeout mode - Value converted to milliseconds
+ self._timeout = value * 1000
+
+ def gettimeout(self):
+ return self._timeout
+
+ def setsockopt(self, level, optname, value):
+ raise NotImplementedError()
+
+ @check_closed
+ def shutdown(self, how):
+ return self.close()
+
+
+class NpipeFileIOBase(io.RawIOBase):
+ def __init__(self, npipe_socket):
+ self.sock = npipe_socket
+
+ def close(self):
+ super(NpipeFileIOBase, self).close()
+ self.sock = None
+
+ def fileno(self):
+ return self.sock.fileno()
+
+ def isatty(self):
+ return False
+
+ def readable(self):
+ return True
+
+ def readinto(self, buf):
+ return self.sock.recv_into(buf)
+
+ def seekable(self):
+ return False
+
+ def writable(self):
+ return False
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/sshconn.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/sshconn.py
new file mode 100644
index 00000000..063c2882
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/sshconn.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import os
+import signal
+import socket
+import subprocess
+import traceback
+
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves.queue import Empty
+from ansible.module_utils.six.moves.urllib_parse import urlparse
+
+from .basehttpadapter import BaseHTTPAdapter
+from .. import constants
+
+if PY3:
+ import http.client as httplib
+else:
+ import httplib
+
+from .._import_helper import HTTPAdapter, urllib3
+
+PARAMIKO_IMPORT_ERROR = None
+try:
+ import paramiko
+except ImportError:
+ PARAMIKO_IMPORT_ERROR = traceback.format_exc()
+
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class SSHSocket(socket.socket):
+ def __init__(self, host):
+ super(SSHSocket, self).__init__(
+ socket.AF_INET, socket.SOCK_STREAM)
+ self.host = host
+ self.port = None
+ self.user = None
+ if ':' in self.host:
+ self.host, self.port = self.host.split(':')
+ if '@' in self.host:
+ self.user, self.host = self.host.split('@')
+
+ self.proc = None
+
+ def connect(self, **kwargs):
+ args = ['ssh']
+ if self.user:
+ args = args + ['-l', self.user]
+
+ if self.port:
+ args = args + ['-p', self.port]
+
+ args = args + ['--', self.host, 'docker system dial-stdio']
+
+ preexec_func = None
+ if not constants.IS_WINDOWS_PLATFORM:
+ def f():
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ preexec_func = f
+
+ env = dict(os.environ)
+
+ # drop LD_LIBRARY_PATH and SSL_CERT_FILE
+ env.pop('LD_LIBRARY_PATH', None)
+ env.pop('SSL_CERT_FILE', None)
+
+ self.proc = subprocess.Popen(
+ args,
+ env=env,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=preexec_func)
+
+ def _write(self, data):
+ if not self.proc or self.proc.stdin.closed:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ written = self.proc.stdin.write(data)
+ self.proc.stdin.flush()
+ return written
+
+ def sendall(self, data):
+ self._write(data)
+
+ def send(self, data):
+ return self._write(data)
+
+ def recv(self, n):
+ if not self.proc:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ return self.proc.stdout.read(n)
+
+ def makefile(self, mode):
+ if not self.proc:
+ self.connect()
+ if PY3:
+ self.proc.stdout.channel = self
+
+ return self.proc.stdout
+
+ def close(self):
+ if not self.proc or self.proc.stdin.closed:
+ return
+ self.proc.stdin.write(b'\n\n')
+ self.proc.stdin.flush()
+ self.proc.terminate()
+
+
+class SSHConnection(httplib.HTTPConnection, object):
+ def __init__(self, ssh_transport=None, timeout=60, host=None):
+ super(SSHConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.ssh_transport = ssh_transport
+ self.timeout = timeout
+ self.ssh_host = host
+
+ def connect(self):
+ if self.ssh_transport:
+ sock = self.ssh_transport.open_session()
+ sock.settimeout(self.timeout)
+ sock.exec_command('docker system dial-stdio')
+ else:
+ sock = SSHSocket(self.ssh_host)
+ sock.settimeout(self.timeout)
+ sock.connect()
+
+ self.sock = sock
+
+
+class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ scheme = 'ssh'
+
+ def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
+ super(SSHConnectionPool, self).__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.ssh_transport = None
+ self.timeout = timeout
+ if ssh_client:
+ self.ssh_transport = ssh_client.get_transport()
+ self.ssh_host = host
+
+ def _new_conn(self):
+ return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
+
+ # When re-using connections, urllib3 calls fileno() on our
+ # SSH channel instance, quickly overloading our fd limit. To avoid this,
+ # we override _get_conn
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
+
+ except Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ )
+ pass # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class SSHHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = HTTPAdapter.__attrs__ + [
+ 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
+ ]
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
+ shell_out=False):
+ self.ssh_client = None
+ if not shell_out:
+ self._create_paramiko_client(base_url)
+ self._connect()
+
+ self.ssh_host = base_url
+ if base_url.startswith('ssh://'):
+ self.ssh_host = base_url[len('ssh://'):]
+
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(SSHHTTPAdapter, self).__init__()
+
+ def _create_paramiko_client(self, base_url):
+ logging.getLogger("paramiko").setLevel(logging.WARNING)
+ self.ssh_client = paramiko.SSHClient()
+ base_url = urlparse(base_url)
+ self.ssh_params = {
+ "hostname": base_url.hostname,
+ "port": base_url.port,
+ "username": base_url.username,
+ }
+ ssh_config_file = os.path.expanduser("~/.ssh/config")
+ if os.path.exists(ssh_config_file):
+ conf = paramiko.SSHConfig()
+ with open(ssh_config_file) as f:
+ conf.parse(f)
+ host_config = conf.lookup(base_url.hostname)
+ if 'proxycommand' in host_config:
+ self.ssh_params["sock"] = paramiko.ProxyCommand(
+ host_config['proxycommand']
+ )
+ if 'hostname' in host_config:
+ self.ssh_params['hostname'] = host_config['hostname']
+ if base_url.port is None and 'port' in host_config:
+ self.ssh_params['port'] = host_config['port']
+ if base_url.username is None and 'user' in host_config:
+ self.ssh_params['username'] = host_config['user']
+ if 'identityfile' in host_config:
+ self.ssh_params['key_filename'] = host_config['identityfile']
+
+ self.ssh_client.load_system_host_keys()
+ self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
+
+ def _connect(self):
+ if self.ssh_client:
+ self.ssh_client.connect(**self.ssh_params)
+
+ def get_connection(self, url, proxies=None):
+ if not self.ssh_client:
+ return SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ # Connection is closed try a reconnect
+ if self.ssh_client and not self.ssh_client.get_transport():
+ self._connect()
+
+ pool = SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def close(self):
+ super(SSHHTTPAdapter, self).close()
+ if self.ssh_client:
+ self.ssh_client.close()
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py
new file mode 100644
index 00000000..e1b5ce02
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+""" Resolves OpenSSL issues in some servers:
+ https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
+ https://github.com/kennethreitz/requests/pull/799
+"""
+
+from ansible_collections.community.docker.plugins.module_utils.version import StrictVersion
+
+from .._import_helper import HTTPAdapter, urllib3
+from .basehttpadapter import BaseHTTPAdapter
+
+
+PoolManager = urllib3.poolmanager.PoolManager
+
+
+class SSLHTTPAdapter(BaseHTTPAdapter):
+ '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
+
+ __attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
+ 'assert_hostname',
+ 'ssl_version']
+
+ def __init__(self, ssl_version=None, assert_hostname=None,
+ assert_fingerprint=None, **kwargs):
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ super(SSLHTTPAdapter, self).__init__(**kwargs)
+
+ def init_poolmanager(self, connections, maxsize, block=False):
+ kwargs = {
+ 'num_pools': connections,
+ 'maxsize': maxsize,
+ 'block': block,
+ 'assert_hostname': self.assert_hostname,
+ 'assert_fingerprint': self.assert_fingerprint,
+ }
+ if self.ssl_version and self.can_override_ssl_version():
+ kwargs['ssl_version'] = self.ssl_version
+
+ self.poolmanager = PoolManager(**kwargs)
+
+ def get_connection(self, *args, **kwargs):
+ """
+ Ensure assert_hostname is set correctly on our pool
+
+ We already take care of a normal poolmanager via init_poolmanager
+
+ But we still need to take care of when there is a proxy poolmanager
+ """
+ conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
+ if conn.assert_hostname != self.assert_hostname:
+ conn.assert_hostname = self.assert_hostname
+ return conn
+
+ def can_override_ssl_version(self):
+ urllib_ver = urllib3.__version__.split('-')[0]
+ if urllib_ver is None:
+ return False
+ if urllib_ver == 'dev':
+ return True
+ return StrictVersion(urllib_ver) > StrictVersion('1.5')
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/unixconn.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/unixconn.py
new file mode 100644
index 00000000..f46372f9
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/unixconn.py
@@ -0,0 +1,123 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import socket
+
+from ansible.module_utils.six import PY2
+from ansible.module_utils.six.moves import http_client as httplib
+
+from .basehttpadapter import BaseHTTPAdapter
+from .. import constants
+
+from .._import_helper import HTTPAdapter, urllib3
+
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class UnixHTTPResponse(httplib.HTTPResponse, object):
+ def __init__(self, sock, *args, **kwargs):
+ disable_buffering = kwargs.pop('disable_buffering', False)
+ if PY2:
+ # FIXME: We may need to disable buffering on Py3 as well,
+ # but there's no clear way to do it at the moment. See:
+ # https://github.com/docker/docker-py/issues/1799
+ kwargs['buffering'] = not disable_buffering
+ super(UnixHTTPResponse, self).__init__(sock, *args, **kwargs)
+
+
+class UnixHTTPConnection(httplib.HTTPConnection, object):
+
+ def __init__(self, base_url, unix_socket, timeout=60):
+ super(UnixHTTPConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.base_url = base_url
+ self.unix_socket = unix_socket
+ self.timeout = timeout
+ self.disable_buffering = False
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.settimeout(self.timeout)
+ sock.connect(self.unix_socket)
+ self.sock = sock
+
+ def putheader(self, header, *values):
+ super(UnixHTTPConnection, self).putheader(header, *values)
+ if header == 'Connection' and 'Upgrade' in values:
+ self.disable_buffering = True
+
+ def response_class(self, sock, *args, **kwargs):
+ if self.disable_buffering:
+ kwargs['disable_buffering'] = True
+
+ return UnixHTTPResponse(sock, *args, **kwargs)
+
+
+class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
+ super(UnixHTTPConnectionPool, self).__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.base_url = base_url
+ self.socket_path = socket_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return UnixHTTPConnection(
+ self.base_url, self.socket_path, self.timeout
+ )
+
+
+class UnixHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = HTTPAdapter.__attrs__ + ['pools',
+ 'socket_path',
+ 'timeout',
+ 'max_pool_size']
+
+ def __init__(self, socket_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
+ socket_path = socket_url.replace('http+unix://', '')
+ if not socket_path.startswith('/'):
+ socket_path = '/' + socket_path
+ self.socket_path = socket_path
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(UnixHTTPAdapter, self).__init__()
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = UnixHTTPConnectionPool(
+ url, self.socket_path, self.timeout,
+ maxsize=self.max_pool_size
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-py/issues/811
+ return request.path_url
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/types/daemon.py b/ansible_collections/community/docker/plugins/module_utils/_api/types/daemon.py
new file mode 100644
index 00000000..61964428
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/types/daemon.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import socket
+
+from .._import_helper import urllib3
+
+from ..errors import DockerException
+
+
+class CancellableStream(object):
+ """
+ Stream wrapper for real-time events, logs, etc. from the server.
+
+ Example:
+ >>> events = client.events()
+ >>> for event in events:
+ ... print(event)
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ def __init__(self, stream, response):
+ self._stream = stream
+ self._response = response
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ return next(self._stream)
+ except urllib3.exceptions.ProtocolError:
+ raise StopIteration
+ except socket.error:
+ raise StopIteration
+
+ next = __next__
+
+ def close(self):
+ """
+ Closes the event streaming.
+ """
+
+ if not self._response.raw.closed:
+ # find the underlying socket object
+ # based on api.client._get_raw_response_socket
+
+ sock_fp = self._response.raw._fp.fp
+
+ if hasattr(sock_fp, 'raw'):
+ sock_raw = sock_fp.raw
+
+ if hasattr(sock_raw, 'sock'):
+ sock = sock_raw.sock
+
+ elif hasattr(sock_raw, '_sock'):
+ sock = sock_raw._sock
+
+ elif hasattr(sock_fp, 'channel'):
+ # We're working with a paramiko (SSH) channel, which doesn't
+ # support cancelable streams with the current implementation
+ raise DockerException(
+ 'Cancellable streams not supported for the SSH protocol'
+ )
+ else:
+ sock = sock_fp._sock
+
+ if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
+ sock, urllib3.contrib.pyopenssl.WrappedSocket):
+ sock = sock.socket
+
+ sock.shutdown(socket.SHUT_RDWR)
+ sock.close()
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/build.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/build.py
new file mode 100644
index 00000000..85704f94
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/build.py
@@ -0,0 +1,305 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import io
+import os
+import random
+import re
+import tarfile
+import tempfile
+
+from ansible.module_utils.six import PY3
+
+from . import fnmatch
+from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
+
+
+_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
+
+
+def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
+ root = os.path.abspath(path)
+ exclude = exclude or []
+ dockerfile = dockerfile or (None, None)
+ extra_files = []
+ if dockerfile[1] is not None:
+ dockerignore_contents = '\n'.join(
+ (exclude or ['.dockerignore']) + [dockerfile[0]]
+ )
+ extra_files = [
+ ('.dockerignore', dockerignore_contents),
+ dockerfile,
+ ]
+ return create_archive(
+ files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
+ root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
+ )
+
+
+def exclude_paths(root, patterns, dockerfile=None):
+ """
+ Given a root directory path and a list of .dockerignore patterns, return
+ an iterator of all paths (both regular files and directories) in the root
+ directory that do *not* match any of the patterns.
+
+ All paths returned are relative to the root.
+ """
+
+ if dockerfile is None:
+ dockerfile = 'Dockerfile'
+
+ patterns.append('!' + dockerfile)
+ pm = PatternMatcher(patterns)
+ return set(pm.walk(root))
+
+
+def build_file_list(root):
+ files = []
+ for dirname, dirnames, fnames in os.walk(root):
+ for filename in fnames + dirnames:
+ longpath = os.path.join(dirname, filename)
+ files.append(
+ longpath.replace(root, '', 1).lstrip('/')
+ )
+
+ return files
+
+
+def create_archive(root, files=None, fileobj=None, gzip=False,
+ extra_files=None):
+ extra_files = extra_files or []
+ if not fileobj:
+ fileobj = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
+ if files is None:
+ files = build_file_list(root)
+ extra_names = set(e[0] for e in extra_files)
+ for path in files:
+ if path in extra_names:
+ # Extra files override context files with the same name
+ continue
+ full_path = os.path.join(root, path)
+
+ i = t.gettarinfo(full_path, arcname=path)
+ if i is None:
+ # This happens when we encounter a socket file. We can safely
+ # ignore it and proceed.
+ continue
+
+ # Workaround https://bugs.python.org/issue32713
+ if i.mtime < 0 or i.mtime > 8**11 - 1:
+ i.mtime = int(i.mtime)
+
+ if IS_WINDOWS_PLATFORM:
+ # Windows doesn't keep track of the execute bit, so we make files
+ # and directories executable by default.
+ i.mode = i.mode & 0o755 | 0o111
+
+ if i.isfile():
+ try:
+ with open(full_path, 'rb') as f:
+ t.addfile(i, f)
+ except IOError:
+ raise IOError(
+ 'Can not read file in context: {0}'.format(full_path)
+ )
+ else:
+ # Directories, FIFOs, symlinks... don't need to be read.
+ t.addfile(i, None)
+
+ for name, contents in extra_files:
+ info = tarfile.TarInfo(name)
+ contents_encoded = contents.encode('utf-8')
+ info.size = len(contents_encoded)
+ t.addfile(info, io.BytesIO(contents_encoded))
+
+ t.close()
+ fileobj.seek(0)
+ return fileobj
+
+
+def mkbuildcontext(dockerfile):
+ f = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w', fileobj=f)
+ if isinstance(dockerfile, io.StringIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ if PY3:
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
+ else:
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ elif isinstance(dockerfile, io.BytesIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ else:
+ dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
+ t.addfile(dfinfo, dockerfile)
+ t.close()
+ f.seek(0)
+ return f
+
+
+def split_path(p):
+ return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
+
+
+def normalize_slashes(p):
+ if IS_WINDOWS_PLATFORM:
+ return '/'.join(split_path(p))
+ return p
+
+
+def walk(root, patterns, default=True):
+ pm = PatternMatcher(patterns)
+ return pm.walk(root)
+
+
+# Heavily based on
+# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
+class PatternMatcher(object):
+ def __init__(self, patterns):
+ self.patterns = list(filter(
+ lambda p: p.dirs, [Pattern(p) for p in patterns]
+ ))
+ self.patterns.append(Pattern('!.dockerignore'))
+
+ def matches(self, filepath):
+ matched = False
+ parent_path = os.path.dirname(filepath)
+ parent_path_dirs = split_path(parent_path)
+
+ for pattern in self.patterns:
+ negative = pattern.exclusion
+ match = pattern.match(filepath)
+ if not match and parent_path != '':
+ if len(pattern.dirs) <= len(parent_path_dirs):
+ match = pattern.match(
+ os.path.sep.join(parent_path_dirs[:len(pattern.dirs)])
+ )
+
+ if match:
+ matched = not negative
+
+ return matched
+
+ def walk(self, root):
+ def rec_walk(current_dir):
+ for f in os.listdir(current_dir):
+ fpath = os.path.join(
+ os.path.relpath(current_dir, root), f
+ )
+ if fpath.startswith('.' + os.path.sep):
+ fpath = fpath[2:]
+ match = self.matches(fpath)
+ if not match:
+ yield fpath
+
+ cur = os.path.join(root, fpath)
+ if not os.path.isdir(cur) or os.path.islink(cur):
+ continue
+
+ if match:
+ # If we want to skip this file and it's a directory
+ # then we should first check to see if there's an
+ # excludes pattern (e.g. !dir/file) that starts with this
+ # dir. If so then we can't skip this dir.
+ skip = True
+
+ for pat in self.patterns:
+ if not pat.exclusion:
+ continue
+ if pat.cleaned_pattern.startswith(
+ normalize_slashes(fpath)):
+ skip = False
+ break
+ if skip:
+ continue
+ for sub in rec_walk(cur):
+ yield sub
+
+ return rec_walk(root)
+
+
+class Pattern(object):
+ def __init__(self, pattern_str):
+ self.exclusion = False
+ if pattern_str.startswith('!'):
+ self.exclusion = True
+ pattern_str = pattern_str[1:]
+
+ self.dirs = self.normalize(pattern_str)
+ self.cleaned_pattern = '/'.join(self.dirs)
+
+ @classmethod
+ def normalize(cls, p):
+
+ # Remove trailing spaces
+ p = p.strip()
+
+ # Leading and trailing slashes are not relevant. Yes,
+ # "foo.py/" must exclude the "foo.py" regular file. "."
+ # components are not relevant either, even if the whole
+ # pattern is only ".", as the Docker reference states: "For
+ # historical reasons, the pattern . is ignored."
+ # ".." component must be cleared with the potential previous
+ # component, regardless of whether it exists: "A preprocessing
+ # step [...] eliminates . and .. elements using Go's
+ # filepath.".
+ i = 0
+ split = split_path(p)
+ while i < len(split):
+ if split[i] == '..':
+ del split[i]
+ if i > 0:
+ del split[i - 1]
+ i -= 1
+ else:
+ i += 1
+ return split
+
+ def match(self, filepath):
+ return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
+
+
+def process_dockerfile(dockerfile, path):
+ if not dockerfile:
+ return (None, None)
+
+ abs_dockerfile = dockerfile
+ if not os.path.isabs(dockerfile):
+ abs_dockerfile = os.path.join(path, dockerfile)
+ if IS_WINDOWS_PLATFORM and path.startswith(
+ WINDOWS_LONGPATH_PREFIX):
+ abs_dockerfile = '{0}{1}'.format(
+ WINDOWS_LONGPATH_PREFIX,
+ os.path.normpath(
+ abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX):]
+ )
+ )
+ if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
+ os.path.relpath(abs_dockerfile, path).startswith('..')):
+ # Dockerfile not in context - read data to insert into tar later
+ with open(abs_dockerfile) as df:
+ return (
+ '.dockerfile.{random:x}'.format(random=random.getrandbits(160)),
+ df.read()
+ )
+
+ # Dockerfile is inside the context - return path relative to context root
+ if dockerfile == abs_dockerfile:
+ # Only calculate relpath if necessary to avoid errors
+ # on Windows client -> Linux Docker
+ # see https://github.com/docker/compose/issues/5969
+ dockerfile = os.path.relpath(abs_dockerfile, path)
+ return (dockerfile, None)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/config.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/config.py
new file mode 100644
index 00000000..eed538b4
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/config.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import logging
+import os
+
+from ..constants import IS_WINDOWS_PLATFORM
+
+DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
+LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
+
+log = logging.getLogger(__name__)
+
+
+def find_config_file(config_path=None):
+ paths = list(filter(None, [
+ config_path, # 1
+ config_path_from_environment(), # 2
+ os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
+ os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
+ ]))
+
+ log.debug("Trying paths: %s", repr(paths))
+
+ for path in paths:
+ if os.path.exists(path):
+ log.debug("Found file at path: %s", path)
+ return path
+
+ log.debug("No config file found")
+
+ return None
+
+
+def config_path_from_environment():
+ config_dir = os.environ.get('DOCKER_CONFIG')
+ if not config_dir:
+ return None
+ return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
+
+
+def home_dir():
+ """
+ Get the user's home directory, using the same logic as the Docker Engine
+ client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return os.environ.get('USERPROFILE', '')
+ else:
+ return os.path.expanduser('~')
+
+
+def load_general_config(config_path=None):
+ config_file = find_config_file(config_path)
+
+ if not config_file:
+ return {}
+
+ try:
+ with open(config_file) as f:
+ return json.load(f)
+ except (IOError, ValueError) as e:
+ # In the case of a legacy `.dockercfg` file, we won't
+ # be able to load any JSON data.
+ log.debug(e)
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/decorators.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/decorators.py
new file mode 100644
index 00000000..ec2d258a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/decorators.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import functools
+
+from .. import errors
+from . import utils
+
+
+def check_resource(resource_name):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapped(self, resource_id=None, *args, **kwargs):
+ if resource_id is None and kwargs.get(resource_name):
+ resource_id = kwargs.pop(resource_name)
+ if isinstance(resource_id, dict):
+ resource_id = resource_id.get('Id', resource_id.get('ID'))
+ if not resource_id:
+ raise errors.NullResource(
+ 'Resource ID was not provided'
+ )
+ return f(self, resource_id, *args, **kwargs)
+ return wrapped
+ return decorator
+
+
+def minimum_version(version):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if utils.version_lt(self._version, version):
+ raise errors.InvalidVersion(
+ '{0} is not available for version < {1}'.format(
+ f.__name__, version
+ )
+ )
+ return f(self, *args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def update_headers(f):
+ def inner(self, *args, **kwargs):
+ if 'HttpHeaders' in self._general_configs:
+ if not kwargs.get('headers'):
+ kwargs['headers'] = self._general_configs['HttpHeaders']
+ else:
+ kwargs['headers'].update(self._general_configs['HttpHeaders'])
+ return f(self, *args, **kwargs)
+ return inner
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/fnmatch.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/fnmatch.py
new file mode 100644
index 00000000..f6e77a5f
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/fnmatch.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""Filename matching with shell patterns.
+
+fnmatch(FILENAME, PATTERN) matches according to the local convention.
+fnmatchcase(FILENAME, PATTERN) always takes case in account.
+
+The functions operate by translating the pattern into a regular
+expression. They cache the compiled regular expressions for speed.
+
+The function translate(PATTERN) returns a regular expression
+corresponding to PATTERN. (It does not compile it.)
+"""
+
+import re
+
+__all__ = ["fnmatch", "fnmatchcase", "translate"]
+
+_cache = {}
+_MAXCACHE = 100
+
+
+def _purge():
+ """Clear the pattern cache"""
+ _cache.clear()
+
+
+def fnmatch(name, pat):
+ """Test whether FILENAME matches PATTERN.
+
+ Patterns are Unix shell style:
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ An initial period in FILENAME is not special.
+ Both FILENAME and PATTERN are first case-normalized
+ if the operating system requires it.
+ If you don't want this, use fnmatchcase(FILENAME, PATTERN).
+ """
+
+ name = name.lower()
+ pat = pat.lower()
+ return fnmatchcase(name, pat)
+
+
+def fnmatchcase(name, pat):
+ """Test whether FILENAME matches PATTERN, including case.
+ This is a version of fnmatch() which doesn't case-normalize
+ its arguments.
+ """
+
+ try:
+ re_pat = _cache[pat]
+ except KeyError:
+ res = translate(pat)
+ if len(_cache) >= _MAXCACHE:
+ _cache.clear()
+ _cache[pat] = re_pat = re.compile(res)
+ return re_pat.match(name) is not None
+
+
+def translate(pat):
+ """Translate a shell PATTERN to a regular expression.
+
+ There is no way to quote meta-characters.
+ """
+ i, n = 0, len(pat)
+ res = '^'
+ while i < n:
+ c = pat[i]
+ i = i + 1
+ if c == '*':
+ if i < n and pat[i] == '*':
+ # is some flavor of "**"
+ i = i + 1
+ # Treat **/ as ** so eat the "/"
+ if i < n and pat[i] == '/':
+ i = i + 1
+ if i >= n:
+ # is "**EOF" - to align with .gitignore just accept all
+ res = res + '.*'
+ else:
+ # is "**"
+ # Note that this allows for any # of /'s (even 0) because
+ # the .* will eat everything, even /'s
+ res = res + '(.*/)?'
+ else:
+ # is "*" so map it to anything but "/"
+ res = res + '[^/]*'
+ elif c == '?':
+ # "?" is any char except "/"
+ res = res + '[^/]'
+ elif c == '[':
+ j = i
+ if j < n and pat[j] == '!':
+ j = j + 1
+ if j < n and pat[j] == ']':
+ j = j + 1
+ while j < n and pat[j] != ']':
+ j = j + 1
+ if j >= n:
+ res = res + '\\['
+ else:
+ stuff = pat[i:j].replace('\\', '\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] == '^':
+ stuff = '\\' + stuff
+ res = '%s[%s]' % (res, stuff)
+ else:
+ res = res + re.escape(c)
+
+ return res + '$'
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/json_stream.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/json_stream.py
new file mode 100644
index 00000000..f3a74bac
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/json_stream.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import json.decoder
+
+from ansible.module_utils.six import text_type
+
+from ..errors import StreamParseError
+
+
+json_decoder = json.JSONDecoder()
+
+
+def stream_as_text(stream):
+ """
+ Given a stream of bytes or text, if any of the items in the stream
+ are bytes convert them to text.
+ This function can be removed once we return text streams
+ instead of byte streams.
+ """
+ for data in stream:
+ if not isinstance(data, text_type):
+ data = data.decode('utf-8', 'replace')
+ yield data
+
+
+def json_splitter(buffer):
+ """Attempt to parse a json object from a buffer. If there is at least one
+ object, return it and the rest of the buffer, otherwise return None.
+ """
+ buffer = buffer.strip()
+ try:
+ obj, index = json_decoder.raw_decode(buffer)
+ rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
+ return obj, rest
+ except ValueError:
+ return None
+
+
+def json_stream(stream):
+ """Given a stream of text, return a stream of json objects.
+ This handles streams which are inconsistently buffered (some entries may
+ be newline delimited, and others are not).
+ """
+ return split_buffer(stream, json_splitter, json_decoder.decode)
+
+
+def line_splitter(buffer, separator=u'\n'):
+ index = buffer.find(text_type(separator))
+ if index == -1:
+ return None
+ return buffer[:index + 1], buffer[index + 1:]
+
+
+def split_buffer(stream, splitter=None, decoder=lambda a: a):
+ """Given a generator which yields strings and a splitter function,
+ joins all input, splits on the separator and yields each chunk.
+ Unlike string.split(), each chunk includes the trailing
+ separator, except for the last one if none was found on the end
+ of the input.
+ """
+ splitter = splitter or line_splitter
+ buffered = text_type('')
+
+ for data in stream_as_text(stream):
+ buffered += data
+ while True:
+ buffer_split = splitter(buffered)
+ if buffer_split is None:
+ break
+
+ item, buffered = buffer_split
+ yield item
+
+ if buffered:
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ raise StreamParseError(e)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/ports.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/ports.py
new file mode 100644
index 00000000..194aaa7a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/ports.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+PORT_SPEC = re.compile(
+ "^" # Match full string
+ "(" # External part
+ r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
+ r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
+ ")?"
+ r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
+ "(?P<proto>/(udp|tcp|sctp))?" # Protocol
+ "$" # Match full string
+)
+
+
+def add_port_mapping(port_bindings, internal_port, external):
+ if internal_port in port_bindings:
+ port_bindings[internal_port].append(external)
+ else:
+ port_bindings[internal_port] = [external]
+
+
+def add_port(port_bindings, internal_port_range, external_range):
+ if external_range is None:
+ for internal_port in internal_port_range:
+ add_port_mapping(port_bindings, internal_port, None)
+ else:
+ ports = zip(internal_port_range, external_range)
+ for internal_port, external_port in ports:
+ add_port_mapping(port_bindings, internal_port, external_port)
+
+
+def build_port_bindings(ports):
+ port_bindings = {}
+ for port in ports:
+ internal_port_range, external_range = split_port(port)
+ add_port(port_bindings, internal_port_range, external_range)
+ return port_bindings
+
+
+def _raise_invalid_port(port):
+ raise ValueError('Invalid port "%s", should be '
+ '[[remote_ip:]remote_port[-remote_port]:]'
+ 'port[/protocol]' % port)
+
+
+def port_range(start, end, proto, randomly_available_port=False):
+ if not start:
+ return start
+ if not end:
+ return [start + proto]
+ if randomly_available_port:
+ return ['{0}-{1}'.format(start, end) + proto]
+ return [str(port) + proto for port in range(int(start), int(end) + 1)]
+
+
+def split_port(port):
+ if hasattr(port, 'legacy_repr'):
+ # This is the worst hack, but it prevents a bug in Compose 1.14.0
+ # https://github.com/docker/docker-py/issues/1668
+ # TODO: remove once fixed in Compose stable
+ port = port.legacy_repr()
+ port = str(port)
+ match = PORT_SPEC.match(port)
+ if match is None:
+ _raise_invalid_port(port)
+ parts = match.groupdict()
+
+ host = parts['host']
+ proto = parts['proto'] or ''
+ internal = port_range(parts['int'], parts['int_end'], proto)
+ external = port_range(
+ parts['ext'], parts['ext_end'], '', len(internal) == 1)
+
+ if host is None:
+ if external is not None and len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, external
+ else:
+ if not external:
+ external = [None] * len(internal)
+ elif len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, [(host, ext_port) for ext_port in external]
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/proxy.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/proxy.py
new file mode 100644
index 00000000..ed20ff53
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/proxy.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .utils import format_environment
+
+
+class ProxyConfig(dict):
+ '''
+ Hold the client's proxy configuration
+ '''
+ @property
+ def http(self):
+ return self.get('http')
+
+ @property
+ def https(self):
+ return self.get('https')
+
+ @property
+ def ftp(self):
+ return self.get('ftp')
+
+ @property
+ def no_proxy(self):
+ return self.get('no_proxy')
+
+ @staticmethod
+ def from_dict(config):
+ '''
+ Instantiate a new ProxyConfig from a dictionary that represents a
+ client configuration, as described in `the documentation`_.
+
+ .. _the documentation:
+ https://docs.docker.com/network/proxy/#configure-the-docker-client
+ '''
+ return ProxyConfig(
+ http=config.get('httpProxy'),
+ https=config.get('httpsProxy'),
+ ftp=config.get('ftpProxy'),
+ no_proxy=config.get('noProxy'),
+ )
+
+ def get_environment(self):
+ '''
+ Return a dictionary representing the environment variables used to
+ set the proxy settings.
+ '''
+ env = {}
+ if self.http:
+ env['http_proxy'] = env['HTTP_PROXY'] = self.http
+ if self.https:
+ env['https_proxy'] = env['HTTPS_PROXY'] = self.https
+ if self.ftp:
+ env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
+ if self.no_proxy:
+ env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
+ return env
+
+ def inject_proxy_environment(self, environment):
+ '''
+ Given a list of strings representing environment variables, prepend the
+ environment variables corresponding to the proxy settings.
+ '''
+ if not self:
+ return environment
+
+ proxy_env = format_environment(self.get_environment())
+ if not environment:
+ return proxy_env
+ # It is important to prepend our variables, because we want the
+ # variables defined in "environment" to take precedence.
+ return proxy_env + environment
+
+ def __str__(self):
+ return 'ProxyConfig(http={0}, https={1}, ftp={2}, no_proxy={3})'.format(
+ self.http, self.https, self.ftp, self.no_proxy)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py
new file mode 100644
index 00000000..81c0c4f8
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import os
+import select
+import socket as pysocket
+import struct
+
+from ansible.module_utils.six import PY3, binary_type
+
+from ..transport.npipesocket import NpipeSocket
+
+
+STDOUT = 1
+STDERR = 2
+
+
+class SocketError(Exception):
+ pass
+
+
+# NpipeSockets have their own error types
+# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
+NPIPE_ENDED = 109
+
+
+def read(socket, n=4096):
+ """
+ Reads at most n bytes from socket
+ """
+
+ recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
+
+ if PY3 and not isinstance(socket, NpipeSocket):
+ select.select([socket], [], [])
+
+ try:
+ if hasattr(socket, 'recv'):
+ return socket.recv(n)
+ if PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
+ return socket.read(n)
+ return os.read(socket.fileno(), n)
+ except EnvironmentError as e:
+ if e.errno not in recoverable_errors:
+ raise
+ except Exception as e:
+ is_pipe_ended = (isinstance(socket, NpipeSocket) and
+ len(e.args) > 0 and
+ e.args[0] == NPIPE_ENDED)
+ if is_pipe_ended:
+ # npipes don't support duplex sockets, so we interpret
+ # a PIPE_ENDED error as a close operation (0-length read).
+ return 0
+ raise
+
+
+def read_exactly(socket, n):
+ """
+ Reads exactly n bytes from socket
+ Raises SocketError if there isn't enough data
+ """
+ data = binary_type()
+ while len(data) < n:
+ next_data = read(socket, n - len(data))
+ if not next_data:
+ raise SocketError("Unexpected EOF")
+ data += next_data
+ return data
+
+
+def next_frame_header(socket):
+ """
+ Returns the stream and size of the next frame of data waiting to be read
+ from socket, according to the protocol defined here:
+
+ https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
+ """
+ try:
+ data = read_exactly(socket, 8)
+ except SocketError:
+ return (-1, -1)
+
+ stream, actual = struct.unpack('>BxxxL', data)
+ return (stream, actual)
+
+
+def frames_iter(socket, tty):
+ """
+ Return a generator of frames read from socket. A frame is a tuple where
+ the first item is the stream number and the second item is a chunk of data.
+
+ If the tty setting is enabled, the streams are multiplexed into the stdout
+ stream.
+ """
+ if tty:
+ return ((STDOUT, frame) for frame in frames_iter_tty(socket))
+ else:
+ return frames_iter_no_tty(socket)
+
+
+def frames_iter_no_tty(socket):
+ """
+ Returns a generator of data read from the socket when the tty setting is
+ not enabled.
+ """
+ while True:
+ (stream, n) = next_frame_header(socket)
+ if n < 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ if result is None:
+ continue
+ data_length = len(result)
+ if data_length == 0:
+ # We have reached EOF
+ return
+ n -= data_length
+ yield (stream, result)
+
+
+def frames_iter_tty(socket):
+ """
+ Return a generator of data read from the socket when the tty setting is
+ enabled.
+ """
+ while True:
+ result = read(socket)
+ if len(result) == 0:
+ # We have reached EOF
+ return
+ yield result
+
+
+def consume_socket_output(frames, demux=False):
+ """
+ Iterate through frames read from the socket and return the result.
+
+ Args:
+
+ demux (bool):
+ If False, stdout and stderr are multiplexed, and the result is the
+ concatenation of all the frames. If True, the streams are
+ demultiplexed, and the result is a 2-tuple where each item is the
+ concatenation of frames belonging to the same stream.
+ """
+ if demux is False:
+ # If the streams are multiplexed, the generator returns strings, that
+ # we just need to concatenate.
+ return binary_type().join(frames)
+
+ # If the streams are demultiplexed, the generator yields tuples
+ # (stdout, stderr)
+ out = [None, None]
+ for frame in frames:
+ # It is guaranteed that for each frame, one and only one stream
+ # is not None.
+ if frame == (None, None):
+ raise AssertionError('frame must be (None, None), but got %s' % (frame, ))
+ if frame[0] is not None:
+ if out[0] is None:
+ out[0] = frame[0]
+ else:
+ out[0] += frame[0]
+ else:
+ if out[1] is None:
+ out[1] = frame[1]
+ else:
+ out[1] += frame[1]
+ return tuple(out)
+
+
+def demux_adaptor(stream_id, data):
+ """
+ Utility to demultiplex stdout and stderr when reading frames from the
+ socket.
+ """
+ if stream_id == STDOUT:
+ return (data, None)
+ elif stream_id == STDERR:
+ return (None, data)
+ else:
+ raise ValueError('{0} is not a valid stream'.format(stream_id))
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py
new file mode 100644
index 00000000..910b0dc3
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py
@@ -0,0 +1,524 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import collections
+import json
+import os
+import os.path
+import shlex
+import string
+from datetime import datetime
+from ansible_collections.community.docker.plugins.module_utils.version import StrictVersion
+
+from ansible.module_utils.six import PY2, PY3, binary_type, integer_types, iteritems, string_types, text_type
+
+from .. import errors
+from ..constants import DEFAULT_HTTP_HOST
+from ..constants import DEFAULT_UNIX_SOCKET
+from ..constants import DEFAULT_NPIPE
+from ..constants import BYTE_UNITS
+from ..tls import TLSConfig
+
+if PY2:
+ from urlparse import urlparse, urlunparse
+else:
+ from urllib.parse import urlparse, urlunparse
+
+
+URLComponents = collections.namedtuple(
+ 'URLComponents',
+ 'scheme netloc url params query fragment',
+)
+
+
+def create_ipam_pool(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_pool has been removed. Please use a '
+ 'docker.types.IPAMPool object instead.'
+ )
+
+
+def create_ipam_config(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_config has been removed. Please use a '
+ 'docker.types.IPAMConfig object instead.'
+ )
+
+
+def decode_json_header(header):
+ data = base64.b64decode(header)
+ if PY3:
+ data = data.decode('utf-8')
+ return json.loads(data)
+
+
+def compare_version(v1, v2):
+ """Compare docker versions
+
+ >>> v1 = '1.9'
+ >>> v2 = '1.10'
+ >>> compare_version(v1, v2)
+ 1
+ >>> compare_version(v2, v1)
+ -1
+ >>> compare_version(v2, v2)
+ 0
+ """
+ s1 = StrictVersion(v1)
+ s2 = StrictVersion(v2)
+ if s1 == s2:
+ return 0
+ elif s1 > s2:
+ return -1
+ else:
+ return 1
+
+
+def version_lt(v1, v2):
+ return compare_version(v1, v2) > 0
+
+
+def version_gte(v1, v2):
+ return not version_lt(v1, v2)
+
+
+def _convert_port_binding(binding):
+ result = {'HostIp': '', 'HostPort': ''}
+ if isinstance(binding, tuple):
+ if len(binding) == 2:
+ result['HostPort'] = binding[1]
+ result['HostIp'] = binding[0]
+ elif isinstance(binding[0], string_types):
+ result['HostIp'] = binding[0]
+ else:
+ result['HostPort'] = binding[0]
+ elif isinstance(binding, dict):
+ if 'HostPort' in binding:
+ result['HostPort'] = binding['HostPort']
+ if 'HostIp' in binding:
+ result['HostIp'] = binding['HostIp']
+ else:
+ raise ValueError(binding)
+ else:
+ result['HostPort'] = binding
+
+ if result['HostPort'] is None:
+ result['HostPort'] = ''
+ else:
+ result['HostPort'] = str(result['HostPort'])
+
+ return result
+
+
+def convert_port_bindings(port_bindings):
+ result = {}
+ for k, v in iteritems(port_bindings):
+ key = str(k)
+ if '/' not in key:
+ key += '/tcp'
+ if isinstance(v, list):
+ result[key] = [_convert_port_binding(binding) for binding in v]
+ else:
+ result[key] = [_convert_port_binding(v)]
+ return result
+
+
+def convert_volume_binds(binds):
+ if isinstance(binds, list):
+ return binds
+
+ result = []
+ for k, v in binds.items():
+ if isinstance(k, binary_type):
+ k = k.decode('utf-8')
+
+ if isinstance(v, dict):
+ if 'ro' in v and 'mode' in v:
+ raise ValueError(
+ 'Binding cannot contain both "ro" and "mode": {0}'
+ .format(repr(v))
+ )
+
+ bind = v['bind']
+ if isinstance(bind, binary_type):
+ bind = bind.decode('utf-8')
+
+ if 'ro' in v:
+ mode = 'ro' if v['ro'] else 'rw'
+ elif 'mode' in v:
+ mode = v['mode']
+ else:
+ mode = 'rw'
+
+ result.append(
+ text_type('{0}:{1}:{2}').format(k, bind, mode)
+ )
+ else:
+ if isinstance(v, binary_type):
+ v = v.decode('utf-8')
+ result.append(
+ text_type('{0}:{1}:rw').format(k, v)
+ )
+ return result
+
+
+def convert_tmpfs_mounts(tmpfs):
+ if isinstance(tmpfs, dict):
+ return tmpfs
+
+ if not isinstance(tmpfs, list):
+ raise ValueError(
+ 'Expected tmpfs value to be either a list or a dict, found: {0}'
+ .format(type(tmpfs).__name__)
+ )
+
+ result = {}
+ for mount in tmpfs:
+ if isinstance(mount, string_types):
+ if ":" in mount:
+ name, options = mount.split(":", 1)
+ else:
+ name = mount
+ options = ""
+
+ else:
+ raise ValueError(
+ "Expected item in tmpfs list to be a string, found: {0}"
+ .format(type(mount).__name__)
+ )
+
+ result[name] = options
+ return result
+
+
+def convert_service_networks(networks):
+ if not networks:
+ return networks
+ if not isinstance(networks, list):
+ raise TypeError('networks parameter must be a list.')
+
+ result = []
+ for n in networks:
+ if isinstance(n, string_types):
+ n = {'Target': n}
+ result.append(n)
+ return result
+
+
+def parse_repository_tag(repo_name):
+ parts = repo_name.rsplit('@', 1)
+ if len(parts) == 2:
+ return tuple(parts)
+ parts = repo_name.rsplit(':', 1)
+ if len(parts) == 2 and '/' not in parts[1]:
+ return tuple(parts)
+ return repo_name, None
+
+
+def parse_host(addr, is_win32=False, tls=False):
+ # Sensible defaults
+ if not addr and is_win32:
+ return DEFAULT_NPIPE
+ if not addr or addr.strip() == 'unix://':
+ return DEFAULT_UNIX_SOCKET
+
+ addr = addr.strip()
+
+ parsed_url = urlparse(addr)
+ proto = parsed_url.scheme
+ if not proto or any(x not in string.ascii_letters + '+' for x in proto):
+ # https://bugs.python.org/issue754016
+ parsed_url = urlparse('//' + addr, 'tcp')
+ proto = 'tcp'
+
+ if proto == 'fd':
+ raise errors.DockerException('fd protocol is not implemented')
+
+ # These protos are valid aliases for our library but not for the
+ # official spec
+ if proto == 'http' or proto == 'https':
+ tls = proto == 'https'
+ proto = 'tcp'
+ elif proto == 'http+unix':
+ proto = 'unix'
+
+ if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
+ raise errors.DockerException(
+ "Invalid bind address protocol: {0}".format(addr)
+ )
+
+ if proto == 'tcp' and not parsed_url.netloc:
+ # "tcp://" is exceptionally disallowed by convention;
+ # omitting a hostname for other protocols is fine
+ raise errors.DockerException(
+ 'Invalid bind address format: {0}'.format(addr)
+ )
+
+ if any([
+ parsed_url.params, parsed_url.query, parsed_url.fragment,
+ parsed_url.password
+ ]):
+ raise errors.DockerException(
+ 'Invalid bind address format: {0}'.format(addr)
+ )
+
+ if parsed_url.path and proto == 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: no path allowed for this protocol:'
+ ' {0}'.format(addr)
+ )
+ else:
+ path = parsed_url.path
+ if proto == 'unix' and parsed_url.hostname is not None:
+ # For legacy reasons, we consider unix://path
+ # to be valid and equivalent to unix:///path
+ path = '/'.join((parsed_url.hostname, path))
+
+ netloc = parsed_url.netloc
+ if proto in ('tcp', 'ssh'):
+ port = parsed_url.port or 0
+ if port <= 0:
+ if proto != 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: port is required:'
+ ' {0}'.format(addr)
+ )
+ port = 22
+ netloc = '{0}:{1}'.format(parsed_url.netloc, port)
+
+ if not parsed_url.hostname:
+ netloc = '{0}:{1}'.format(DEFAULT_HTTP_HOST, port)
+
+ # Rewrite schemes to fit library internals (requests adapters)
+ if proto == 'tcp':
+ proto = 'http{0}'.format('s' if tls else '')
+ elif proto == 'unix':
+ proto = 'http+unix'
+
+ if proto in ('http+unix', 'npipe'):
+ return "{0}://{1}".format(proto, path).rstrip('/')
+ return urlunparse(URLComponents(
+ scheme=proto,
+ netloc=netloc,
+ url=path,
+ params='',
+ query='',
+ fragment='',
+ )).rstrip('/')
+
+
+def parse_devices(devices):
+ device_list = []
+ for device in devices:
+ if isinstance(device, dict):
+ device_list.append(device)
+ continue
+ if not isinstance(device, string_types):
+ raise errors.DockerException(
+ 'Invalid device type {0}'.format(type(device))
+ )
+ device_mapping = device.split(':')
+ if device_mapping:
+ path_on_host = device_mapping[0]
+ if len(device_mapping) > 1:
+ path_in_container = device_mapping[1]
+ else:
+ path_in_container = path_on_host
+ if len(device_mapping) > 2:
+ permissions = device_mapping[2]
+ else:
+ permissions = 'rwm'
+ device_list.append({
+ 'PathOnHost': path_on_host,
+ 'PathInContainer': path_in_container,
+ 'CgroupPermissions': permissions
+ })
+ return device_list
+
+
+def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
+ if not environment:
+ environment = os.environ
+ host = environment.get('DOCKER_HOST')
+
+ # empty string for cert path is the same as unset.
+ cert_path = environment.get('DOCKER_CERT_PATH') or None
+
+ # empty string for tls verify counts as "false".
+ # Any value or 'unset' counts as true.
+ tls_verify = environment.get('DOCKER_TLS_VERIFY')
+ if tls_verify == '':
+ tls_verify = False
+ else:
+ tls_verify = tls_verify is not None
+ enable_tls = cert_path or tls_verify
+
+ params = {}
+
+ if host:
+ params['base_url'] = host
+
+ if not enable_tls:
+ return params
+
+ if not cert_path:
+ cert_path = os.path.join(os.path.expanduser('~'), '.docker')
+
+ if not tls_verify and assert_hostname is None:
+ # assert_hostname is a subset of TLS verification,
+ # so if it's not set already then set it to false.
+ assert_hostname = False
+
+ params['tls'] = TLSConfig(
+ client_cert=(os.path.join(cert_path, 'cert.pem'),
+ os.path.join(cert_path, 'key.pem')),
+ ca_cert=os.path.join(cert_path, 'ca.pem'),
+ verify=tls_verify,
+ ssl_version=ssl_version,
+ assert_hostname=assert_hostname,
+ )
+
+ return params
+
+
+def convert_filters(filters):
+ result = {}
+ for k, v in iteritems(filters):
+ if isinstance(v, bool):
+ v = 'true' if v else 'false'
+ if not isinstance(v, list):
+ v = [v, ]
+ result[k] = [
+ str(item) if not isinstance(item, string_types) else item
+ for item in v
+ ]
+ return json.dumps(result)
+
+
+def datetime_to_timestamp(dt):
+ """Convert a UTC datetime to a Unix timestamp"""
+ delta = dt - datetime.utcfromtimestamp(0)
+ return delta.seconds + delta.days * 24 * 3600
+
+
+def parse_bytes(s):
+ if isinstance(s, integer_types + (float,)):
+ return s
+ if len(s) == 0:
+ return 0
+
+ if s[-2:-1].isalpha() and s[-1].isalpha():
+ if s[-1] == "b" or s[-1] == "B":
+ s = s[:-1]
+ units = BYTE_UNITS
+ suffix = s[-1].lower()
+
+ # Check if the variable is a string representation of an int
+ # without a units part. Assuming that the units are bytes.
+ if suffix.isdigit():
+ digits_part = s
+ suffix = 'b'
+ else:
+ digits_part = s[:-1]
+
+ if suffix in units.keys() or suffix.isdigit():
+ try:
+ digits = float(digits_part)
+ except ValueError:
+ raise errors.DockerException(
+ 'Failed converting the string value for memory ({0}) to'
+ ' an integer.'.format(digits_part)
+ )
+
+ # Reconvert to long for the final result
+ s = int(digits * units[suffix])
+ else:
+ raise errors.DockerException(
+ 'The specified value for memory ({0}) should specify the'
+ ' units. The postfix should be one of the `b` `k` `m` `g`'
+ ' characters'.format(s)
+ )
+
+ return s
+
+
+def normalize_links(links):
+ if isinstance(links, dict):
+ links = iteritems(links)
+
+ return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
+
+
+def parse_env_file(env_file):
+ """
+ Reads a line-separated environment file.
+ The format of each line should be "key=value".
+ """
+ environment = {}
+
+ with open(env_file, 'r') as f:
+ for line in f:
+
+ if line[0] == '#':
+ continue
+
+ line = line.strip()
+ if not line:
+ continue
+
+ parse_line = line.split('=', 1)
+ if len(parse_line) == 2:
+ k, v = parse_line
+ environment[k] = v
+ else:
+ raise errors.DockerException(
+ 'Invalid line in environment file {0}:\n{1}'.format(
+ env_file, line))
+
+ return environment
+
+
+def split_command(command):
+ if PY2 and not isinstance(command, binary_type):
+ command = command.encode('utf-8')
+ return shlex.split(command)
+
+
+def format_environment(environment):
+ def format_env(key, value):
+ if value is None:
+ return key
+ if isinstance(value, binary_type):
+ value = value.decode('utf-8')
+
+ return u'{key}={value}'.format(key=key, value=value)
+ return [format_env(*var) for var in iteritems(environment)]
+
+
+def format_extra_hosts(extra_hosts, task=False):
+ # Use format dictated by Swarm API if container is part of a task
+ if task:
+ return [
+ '{0} {1}'.format(v, k) for k, v in sorted(iteritems(extra_hosts))
+ ]
+
+ return [
+ '{0}:{1}'.format(k, v) for k, v in sorted(iteritems(extra_hosts))
+ ]
+
+
+def create_host_config(self, *args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_host_config has been removed. Please use a '
+ 'docker.types.HostConfig object instead.'
+ )
diff --git a/ansible_collections/community/docker/plugins/module_utils/_scramble.py b/ansible_collections/community/docker/plugins/module_utils/_scramble.py
new file mode 100644
index 00000000..10325731
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_scramble.py
@@ -0,0 +1,56 @@
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import random
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.six import PY2
+
+
+def generate_insecure_key():
+ '''Do NOT use this for cryptographic purposes!'''
+ while True:
+ # Generate a one-byte key. Right now the functions below do not use more
+ # than one byte, so this is sufficient.
+ if PY2:
+ key = chr(random.randint(0, 255))
+ else:
+ key = bytes([random.randint(0, 255)])
+ # Return anything that is not zero
+ if key != b'\x00':
+ return key
+
+
+def scramble(value, key):
+ '''Do NOT use this for cryptographic purposes!'''
+ if len(key) < 1:
+ raise ValueError('Key must be at least one byte')
+ value = to_bytes(value)
+ if PY2:
+ k = ord(key[0])
+ value = b''.join([chr(k ^ ord(b)) for b in value])
+ else:
+ k = key[0]
+ value = bytes([k ^ b for b in value])
+ return '=S=' + to_native(base64.b64encode(value))
+
+
+def unscramble(value, key):
+ '''Do NOT use this for cryptographic purposes!'''
+ if len(key) < 1:
+ raise ValueError('Key must be at least one byte')
+ if not value.startswith(u'=S='):
+ raise ValueError('Value does not start with indicator')
+ value = base64.b64decode(value[3:])
+ if PY2:
+ k = ord(key[0])
+ value = b''.join([chr(k ^ ord(b)) for b in value])
+ else:
+ k = key[0]
+ value = bytes([k ^ b for b in value])
+ return to_text(value)
diff --git a/ansible_collections/community/docker/plugins/module_utils/common.py b/ansible_collections/community/docker/plugins/module_utils/common.py
new file mode 100644
index 00000000..e6a06ed6
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/common.py
@@ -0,0 +1,693 @@
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import abc
+import os
+import platform
+import re
+import sys
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+HAS_DOCKER_PY = True
+HAS_DOCKER_PY_2 = False
+HAS_DOCKER_PY_3 = False
+HAS_DOCKER_ERROR = None
+HAS_DOCKER_TRACEBACK = None
+
+try:
+ from requests.exceptions import SSLError
+ from docker import __version__ as docker_version
+ from docker.errors import APIError, NotFound, TLSParameterError
+ from docker.tls import TLSConfig
+ from docker import auth
+
+ if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
+ HAS_DOCKER_PY_3 = True
+ from docker import APIClient as Client
+ elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ HAS_DOCKER_PY_2 = True
+ from docker import APIClient as Client
+ else:
+ from docker import Client
+
+except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_TRACEBACK = traceback.format_exc()
+ HAS_DOCKER_PY = False
+
+
+# The next two imports ``docker.models`` and ``docker.ssladapter`` are used
+# to ensure the user does not have both ``docker`` and ``docker-py`` modules
+# installed, as they utilize the same namespace are are incompatible
+try:
+ # docker (Docker SDK for Python >= 2.0.0)
+ import docker.models # noqa: F401, pylint: disable=unused-import
+ HAS_DOCKER_MODELS = True
+except ImportError:
+ HAS_DOCKER_MODELS = False
+
+try:
+ # docker-py (Docker SDK for Python < 2.0.0)
+ import docker.ssladapter # noqa: F401, pylint: disable=unused-import
+ HAS_DOCKER_SSLADAPTER = True
+except ImportError:
+ HAS_DOCKER_SSLADAPTER = False
+
+
+try:
+ from requests.exceptions import RequestException # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ # Either Docker SDK for Python is no longer using requests, or Docker SDK for Python isn't around either,
+ # or Docker SDK for Python's dependency requests is missing. In any case, define an exception
+ # class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.util import ( # noqa: F401, pylint: disable=unused-import
+ DEFAULT_DOCKER_HOST,
+ DEFAULT_TLS,
+ DEFAULT_TLS_VERIFY,
+ DEFAULT_TLS_HOSTNAME, # TODO: remove
+ DEFAULT_TIMEOUT_SECONDS,
+ DOCKER_COMMON_ARGS,
+ DOCKER_COMMON_ARGS_VARS, # TODO: remove
+ DOCKER_MUTUALLY_EXCLUSIVE,
+ DOCKER_REQUIRED_TOGETHER,
+ DEFAULT_DOCKER_REGISTRY, # TODO: remove
+ BYTE_SUFFIXES, # TODO: remove
+ is_image_name_id, # TODO: remove
+ is_valid_tag, # TODO: remove
+ sanitize_result,
+ DockerBaseClass, # TODO: remove
+ update_tls_hostname,
+ compare_dict_allow_more_present, # TODO: remove
+ compare_generic, # TODO: remove
+ DifferenceTracker, # TODO: remove
+ clean_dict_booleans_for_docker_api, # TODO: remove
+ convert_duration_to_nanosecond, # TODO: remove
+ parse_healthcheck, # TODO: remove
+ omit_none_from_dict, # TODO: remove
+)
+
+
+MIN_DOCKER_VERSION = "1.8.0"
+
+
+if not HAS_DOCKER_PY:
+ docker_version = None
+
+ # No Docker SDK for Python. Create a place holder client to allow
+ # instantiation of AnsibleModule and proper error handing
+ class Client(object): # noqa: F811
+ def __init__(self, **kwargs):
+ pass
+
+ class APIError(Exception): # noqa: F811
+ pass
+
+ class NotFound(Exception): # noqa: F811
+ pass
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def is_using_tls(auth):
+ return auth['tls_verify'] or auth['tls']
+
+
+def get_connect_params(auth, fail_function):
+ if is_using_tls(auth):
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ result = dict(
+ base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'],
+ )
+
+ if auth['tls_verify']:
+ # TLS with verification
+ tls_config = dict(
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function,
+ )
+ if auth['cert_path'] and auth['key_path']:
+ tls_config['client_cert'] = (auth['cert_path'], auth['key_path'])
+ if auth['cacert_path']:
+ tls_config['ca_cert'] = auth['cacert_path']
+ result['tls'] = _get_tls_config(**tls_config)
+ elif auth['tls']:
+ # TLS without verification
+ tls_config = dict(
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function,
+ )
+ if auth['cert_path'] and auth['key_path']:
+ tls_config['client_cert'] = (auth['cert_path'], auth['key_path'])
+ result['tls'] = _get_tls_config(**tls_config)
+
+ if auth.get('use_ssh_client'):
+ if LooseVersion(docker_version) < LooseVersion('4.4.0'):
+ fail_function("use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer")
+ result['use_ssh_client'] = True
+
+ # No TLS
+ return result
+
+
+DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
+DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
+DOCKERPYUPGRADE_RECOMMEND_DOCKER = "Use `pip install --upgrade docker-py` to upgrade."
+
+
+class AnsibleDockerClientBase(Client):
+ def __init__(self, min_docker_version=None, min_docker_api_version=None):
+ if min_docker_version is None:
+ min_docker_version = MIN_DOCKER_VERSION
+ NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
+
+ self.docker_py_version = LooseVersion(docker_version)
+
+ if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
+ self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
+ "SDK for Python) installed together as they use the same namespace and cause a corrupt "
+ "installation. Please uninstall both packages, and re-install only the docker-py or docker "
+ "python module (for %s's Python %s). It is recommended to install the docker module. Please "
+ "note that simply uninstalling one of the modules can leave the other module in a broken "
+ "state." % (platform.node(), sys.executable))
+
+ if not HAS_DOCKER_PY:
+ msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0 (Python >= 3.6) or "
+ "docker<5.0.0 (Python 2.7)")
+ msg = msg + ", for example via `pip install docker` (Python >= 3.6) or " \
+ + "`pip install docker==4.4.4` (Python 2.7). The error was: %s"
+ self.fail(msg % HAS_DOCKER_ERROR, exception=HAS_DOCKER_TRACEBACK)
+
+ if self.docker_py_version < LooseVersion(min_docker_version):
+ msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
+ if not NEEDS_DOCKER_PY2:
+ # The minimal required version is < 2.0 (and the current version as well).
+ # Advertise docker (instead of docker-py).
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif docker_version < LooseVersion('2.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
+
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.api_version
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ min_docker_api_version = min_docker_api_version or '1.25'
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ @abc.abstractmethod
+ def fail(self, msg, **kwargs):
+ pass
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ pass
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value, type='str'):
+ if param_value is not None:
+ # take module parameter value
+ if type == 'bool':
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return bool(param_value)
+ if type == 'int':
+ return int(param_value)
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if type == 'bool':
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return bool(env_value)
+ if type == 'int':
+ return int(env_value)
+ return env_value
+
+ # take the default
+ return default_value
+
+ @abc.abstractmethod
+ def _get_params(self):
+ pass
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ client_params = self._get_params()
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = client_params.get(key)
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST, type='str'),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', None, type='str'),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto', type='str'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None, type='str'),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None, type='str'),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS, type='bool'),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY, type='bool'),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS, type='int'),
+ use_ssh_client=self._get_value('use_ssh_client', params['use_ssh_client'], None, False, type='bool'),
+ )
+
+ update_tls_hostname(result)
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.inspect_container(container=container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ for container in self.containers(all=True):
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ for network in self.networks():
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.inspect_network(network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case for some Docker versions: if docker.io wasn't there,
+ # it can be that the image wasn't found either
+ # (https://github.com/ansible/ansible/pull/15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images and '/' not in repo_name:
+ # This seems to be happening with podman-docker
+ # (https://github.com/ansible-collections/community.docker/issues/291)
+ lookup = "%s/library/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ inspection = self.inspect_image(images[0]['Id'])
+ except NotFound:
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+ return inspection
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id, accept_missing_image=False):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ inspection = self.inspect_image(image_id)
+ except NotFound as exc:
+ if not accept_missing_image:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ self.log("Image %s not found." % image_id)
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ return inspection
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ response = self.images(name=name)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ images = response
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def pull_image(self, name, tag="latest", platform=None):
+ '''
+ Pull an image
+ '''
+ kwargs = dict(
+ tag=tag,
+ stream=True,
+ decode=True,
+ )
+ if platform is not None:
+ kwargs['platform'] = platform
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ for line in self.pull(name, **kwargs):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+ def inspect_distribution(self, image, **kwargs):
+ '''
+ Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
+ since prior versions did not support accessing private repositories.
+ '''
+ if self.docker_py_version < LooseVersion('4.0.0'):
+ registry = auth.resolve_repository_name(image)[0]
+ header = auth.get_config_header(self, registry)
+ if header:
+ return self._result(self._get(
+ self._url('/distribution/{0}/json', image),
+ headers={'X-Registry-Auth': header}
+ ), json=True)
+ return super(AnsibleDockerClientBase, self).inspect_distribution(image, **kwargs)
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, required_one_of=None, required_by=None,
+ min_docker_version=None, min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if,
+ required_one_of=required_one_of,
+ required_by=required_by or {},
+ )
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+
+ super(AnsibleDockerClient, self).__init__(
+ min_docker_version=min_docker_version,
+ min_docker_api_version=min_docker_api_version)
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ self.module.deprecate(msg, version=version, date=date, collection_name=collection_name)
+
+ def _get_params(self):
+ return self.module.params
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_py = True
+ support_docker_api = True
+ if 'docker_py_version' in data:
+ support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_py and support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ elif not support_docker_py:
+ msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
+ if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif self.docker_py_version < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
diff --git a/ansible_collections/community/docker/plugins/module_utils/common_api.py b/ansible_collections/community/docker/plugins/module_utils/common_api.py
new file mode 100644
index 00000000..7d46a153
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/common_api.py
@@ -0,0 +1,591 @@
+# Copyright 2016 Red Hat | Ansible
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import abc
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+try:
+ from requests.exceptions import RequestException, SSLError # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ # Define an exception class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils._api import auth
+from ansible_collections.community.docker.plugins.module_utils._api.api.client import APIClient as Client
+from ansible_collections.community.docker.plugins.module_utils._api.errors import (
+ APIError,
+ NotFound,
+ MissingRequirementException,
+ TLSParameterError,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.tls import TLSConfig
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
+ convert_filters,
+ parse_repository_tag,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import ( # noqa: F401, pylint: disable=unused-import
+ DEFAULT_DOCKER_HOST,
+ DEFAULT_TLS,
+ DEFAULT_TLS_VERIFY,
+ DEFAULT_TLS_HOSTNAME, # TODO: remove
+ DEFAULT_TIMEOUT_SECONDS,
+ DOCKER_COMMON_ARGS,
+ DOCKER_MUTUALLY_EXCLUSIVE,
+ DOCKER_REQUIRED_TOGETHER,
+ DEFAULT_DOCKER_REGISTRY, # TODO: remove
+ is_image_name_id, # TODO: remove
+ is_valid_tag, # TODO: remove
+ sanitize_result,
+ update_tls_hostname,
+)
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def is_using_tls(auth_data):
+ return auth_data['tls_verify'] or auth_data['tls']
+
+
+def get_connect_params(auth_data, fail_function):
+ if is_using_tls(auth_data):
+ auth_data['docker_host'] = auth_data['docker_host'].replace('tcp://', 'https://')
+
+ result = dict(
+ base_url=auth_data['docker_host'],
+ version=auth_data['api_version'],
+ timeout=auth_data['timeout'],
+ )
+
+ if auth_data['tls_verify']:
+ # TLS with verification
+ tls_config = dict(
+ verify=True,
+ assert_hostname=auth_data['tls_hostname'],
+ ssl_version=auth_data['ssl_version'],
+ fail_function=fail_function,
+ )
+ if auth_data['cert_path'] and auth_data['key_path']:
+ tls_config['client_cert'] = (auth_data['cert_path'], auth_data['key_path'])
+ if auth_data['cacert_path']:
+ tls_config['ca_cert'] = auth_data['cacert_path']
+ result['tls'] = _get_tls_config(**tls_config)
+ elif auth_data['tls']:
+ # TLS without verification
+ tls_config = dict(
+ verify=False,
+ ssl_version=auth_data['ssl_version'],
+ fail_function=fail_function,
+ )
+ if auth_data['cert_path'] and auth_data['key_path']:
+ tls_config['client_cert'] = (auth_data['cert_path'], auth_data['key_path'])
+ result['tls'] = _get_tls_config(**tls_config)
+
+ if auth_data.get('use_ssh_client'):
+ result['use_ssh_client'] = True
+
+ # No TLS
+ return result
+
+
+class AnsibleDockerClientBase(Client):
+ def __init__(self, min_docker_api_version=None):
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.api_version
+ except MissingRequirementException as exc:
+ self.fail(missing_required_lib(exc.requirement), exception=exc.import_exception)
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ min_docker_api_version = min_docker_api_version or '1.25'
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ @abc.abstractmethod
+ def fail(self, msg, **kwargs):
+ pass
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ pass
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value, type='str'):
+ if param_value is not None:
+ # take module parameter value
+ if type == 'bool':
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return bool(param_value)
+ if type == 'int':
+ return int(param_value)
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if type == 'bool':
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return bool(env_value)
+ if type == 'int':
+ return int(env_value)
+ return env_value
+
+ # take the default
+ return default_value
+
+ @abc.abstractmethod
+ def _get_params(self):
+ pass
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ client_params = self._get_params()
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = client_params.get(key)
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST, type='str'),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', None, type='str'),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto', type='str'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None, type='str'),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None, type='str'),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS, type='bool'),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY, type='bool'),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS, type='int'),
+ use_ssh_client=self._get_value('use_ssh_client', params['use_ssh_client'], None, False, type='bool'),
+ )
+
+ def depr(*args, **kwargs):
+ self.deprecate(*args, **kwargs)
+
+ update_tls_hostname(result, old_behavior=True, deprecate_function=depr, uses_tls=is_using_tls(result))
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.get_json('/containers/{0}/json', container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ params = {
+ 'limit': -1,
+ 'all': 1,
+ 'size': 0,
+ 'trunc_cmd': 0,
+ }
+ containers = self.get_json("/containers/json", params=params)
+ for container in containers:
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ networks = self.get_json("/networks")
+ for network in networks:
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.get_json('/networks/{0}', network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ params = {
+ 'only_ids': 0,
+ 'all': 0,
+ }
+ if LooseVersion(self.api_version) < LooseVersion('1.25'):
+ # only use "filter" on API 1.24 and under, as it is deprecated
+ params['filter'] = name
+ else:
+ params['filters'] = convert_filters({'reference': name})
+ images = self.get_json("/images/json", params=params)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ response = images
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case for some Docker versions: if docker.io wasn't there,
+ # it can be that the image wasn't found either
+ # (https://github.com/ansible/ansible/pull/15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images and '/' not in repo_name:
+ # This seems to be happening with podman-docker
+ # (https://github.com/ansible-collections/community.docker/issues/291)
+ lookup = "%s/library/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ return self.get_json('/images/{0}/json', images[0]['Id'])
+ except NotFound:
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id, accept_missing_image=False):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ return self.get_json('/images/{0}/json', image_id)
+ except NotFound as exc:
+ if not accept_missing_image:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ self.log("Image %s not found." % image_id)
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+
+ def pull_image(self, name, tag="latest", platform=None):
+ '''
+ Pull an image
+ '''
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ repository, image_tag = parse_repository_tag(name)
+ registry, repo_name = auth.resolve_repository_name(repository)
+ params = {
+ 'tag': tag or image_tag or 'latest',
+ 'fromImage': repository,
+ }
+ if platform is not None:
+ params['platform'] = platform
+
+ headers = {}
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+
+ response = self._post(
+ self._url('/images/create'), params=params, headers=headers,
+ stream=True, timeout=None
+ )
+ self._raise_for_status(response)
+ for line in self._stream_helper(response, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, required_one_of=None, required_by=None,
+ min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if,
+ required_one_of=required_one_of,
+ required_by=required_by or {},
+ )
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+
+ super(AnsibleDockerClient, self).__init__(min_docker_api_version=min_docker_api_version)
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ self.module.deprecate(msg, version=version, date=date, collection_name=collection_name)
+
+ def _get_params(self):
+ return self.module.params
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_api = True
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
diff --git a/ansible_collections/community/docker/plugins/module_utils/copy.py b/ansible_collections/community/docker/plugins/module_utils/copy.py
new file mode 100644
index 00000000..6df84598
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/copy.py
@@ -0,0 +1,442 @@
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import datetime
+import io
+import json
+import os
+import os.path
+import shutil
+import stat
+import tarfile
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.six import raise_from
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, NotFound
+
+
+class DockerFileCopyError(Exception):
+ pass
+
+
+class DockerUnexpectedError(DockerFileCopyError):
+ pass
+
+
+class DockerFileNotFound(DockerFileCopyError):
+ pass
+
+
+def _put_archive(client, container, path, data):
+ # data can also be file object for streaming. This is because _put uses requests's put().
+ # See https://requests.readthedocs.io/en/latest/user/advanced/#streaming-uploads
+ url = client._url('/containers/{0}/archive', container)
+ res = client._put(url, params={'path': path}, data=data)
+ client._raise_for_status(res)
+ return res.status_code == 200
+
+
+def _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
+ if not stat.S_ISLNK(file_stat.st_mode):
+ raise DockerUnexpectedError('stat information is not for a symlink')
+ bio = io.BytesIO()
+ with tarfile.open(fileobj=bio, mode='w|', dereference=False, encoding='utf-8') as tar:
+ # Note that without both name (bytes) and arcname (unicode), this either fails for
+ # Python 2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
+ # form) it works with Python 2.7, 3.5, 3.6, and 3.7 up to 3.11
+ tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
+ tarinfo.uid = user_id
+ tarinfo.uname = ''
+ if user_name:
+ tarinfo.uname = user_name
+ tarinfo.gid = group_id
+ tarinfo.gname = ''
+ tarinfo.mode &= 0o700
+ if mode is not None:
+ tarinfo.mode = mode
+ if not tarinfo.issym():
+ raise DockerUnexpectedError('stat information is not for a symlink')
+ tar.addfile(tarinfo)
+ return bio.getvalue()
+
+
+def _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
+ yield _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode, user_name)
+
+
+def _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
+ if not stat.S_ISREG(file_stat.st_mode):
+ raise DockerUnexpectedError('stat information is not for a regular file')
+ tarinfo = tarfile.TarInfo()
+ tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
+ tarinfo.mode = (file_stat.st_mode & 0o700) if mode is None else mode
+ tarinfo.uid = user_id
+ tarinfo.gid = group_id
+ tarinfo.size = file_stat.st_size
+ tarinfo.mtime = file_stat.st_mtime
+ tarinfo.type = tarfile.REGTYPE
+ tarinfo.linkname = ''
+ if user_name:
+ tarinfo.uname = user_name
+
+ tarinfo_buf = tarinfo.tobuf()
+ total_size = len(tarinfo_buf)
+ yield tarinfo_buf
+
+ size = tarinfo.size
+ total_size += size
+ with open(b_in_path, 'rb') as f:
+ while size > 0:
+ to_read = min(size, 65536)
+ buf = f.read(to_read)
+ if not buf:
+ break
+ size -= len(buf)
+ yield buf
+ if size:
+ # If for some reason the file shrunk, fill up to the announced size with zeros.
+ # (If it enlarged, ignore the remainder.)
+ yield tarfile.NUL * size
+
+ remainder = tarinfo.size % tarfile.BLOCKSIZE
+ if remainder:
+ # We need to write a multiple of 512 bytes. Fill up with zeros.
+ yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
+ total_size += tarfile.BLOCKSIZE - remainder
+
+ # End with two zeroed blocks
+ yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
+ total_size += 2 * tarfile.BLOCKSIZE
+
+ remainder = total_size % tarfile.RECORDSIZE
+ if remainder > 0:
+ yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
+
+
+def _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=None):
+ tarinfo = tarfile.TarInfo()
+ tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
+ tarinfo.mode = mode
+ tarinfo.uid = user_id
+ tarinfo.gid = group_id
+ tarinfo.size = len(content)
+ try:
+ tarinfo.mtime = int(datetime.datetime.now().timestamp())
+ except AttributeError:
+ # Python 2 (or more precisely: Python < 3.3) has no timestamp(). Use the following
+ # expression for Python 2:
+ tarinfo.mtime = int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds())
+ tarinfo.type = tarfile.REGTYPE
+ tarinfo.linkname = ''
+ if user_name:
+ tarinfo.uname = user_name
+
+ tarinfo_buf = tarinfo.tobuf()
+ total_size = len(tarinfo_buf)
+ yield tarinfo_buf
+
+ total_size += len(content)
+ yield content
+
+ remainder = tarinfo.size % tarfile.BLOCKSIZE
+ if remainder:
+ # We need to write a multiple of 512 bytes. Fill up with zeros.
+ yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
+ total_size += tarfile.BLOCKSIZE - remainder
+
+ # End with two zeroed blocks
+ yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
+ total_size += 2 * tarfile.BLOCKSIZE
+
+ remainder = total_size % tarfile.RECORDSIZE
+ if remainder > 0:
+ yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
+
+
+def put_file(client, container, in_path, out_path, user_id, group_id, mode=None, user_name=None, follow_links=False):
+ """Transfer a file from local to Docker container."""
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise DockerFileNotFound(
+ "file or module does not exist: %s" % to_native(in_path))
+
+ b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
+
+ out_dir, out_file = os.path.split(out_path)
+
+ if follow_links:
+ file_stat = os.stat(b_in_path)
+ else:
+ file_stat = os.lstat(b_in_path)
+
+ if stat.S_ISREG(file_stat.st_mode):
+ stream = _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
+ elif stat.S_ISLNK(file_stat.st_mode):
+ stream = _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
+ else:
+ raise DockerFileCopyError(
+ 'File{0} {1} is neither a regular file nor a symlink (stat mode {2}).'.format(
+ ' referenced by' if follow_links else '', in_path, oct(file_stat.st_mode)))
+
+ ok = _put_archive(client, container, out_dir, stream)
+ if not ok:
+ raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
+
+
+def put_file_content(client, container, content, out_path, user_id, group_id, mode, user_name=None):
+ """Transfer a file from local to Docker container."""
+ out_dir, out_file = os.path.split(out_path)
+
+ stream = _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=user_name)
+
+ ok = _put_archive(client, container, out_dir, stream)
+ if not ok:
+ raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
+
+
+def stat_file(client, container, in_path, follow_links=False, log=None):
+ """Fetch information on a file from a Docker container to local.
+
+ Return a tuple ``(path, stat_data, link_target)`` where:
+
+ :path: is the resolved path in case ``follow_links=True``;
+ :stat_data: is ``None`` if the file does not exist, or a dictionary with fields
+ ``name`` (string), ``size`` (integer), ``mode`` (integer, see https://pkg.go.dev/io/fs#FileMode),
+ ``mtime`` (string), and ``linkTarget`` (string);
+ :link_target: is ``None`` if the file is not a symlink or when ``follow_links=False``,
+ and a string with the symlink target otherwise.
+ """
+ considered_in_paths = set()
+
+ while True:
+ if in_path in considered_in_paths:
+ raise DockerFileCopyError('Found infinite symbolic link loop when trying to stating "{0}"'.format(in_path))
+ considered_in_paths.add(in_path)
+
+ if log:
+ log('FETCH: Stating "%s"' % in_path)
+
+ response = client._head(
+ client._url('/containers/{0}/archive', container),
+ params={'path': in_path},
+ )
+ if response.status_code == 404:
+ return in_path, None, None
+ client._raise_for_status(response)
+ header = response.headers.get('x-docker-container-path-stat')
+ try:
+ stat_data = json.loads(base64.b64decode(header))
+ except Exception as exc:
+ raise DockerUnexpectedError(
+ 'When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}'
+ .format(in_path=in_path, container=container, header=header, exc=exc)
+ )
+
+ # https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink
+ if stat_data['mode'] & (1 << (32 - 5)) != 0:
+ link_target = stat_data['linkTarget']
+ if not follow_links:
+ return in_path, stat_data, link_target
+ in_path = os.path.join(os.path.split(in_path)[0], link_target)
+ continue
+
+ return in_path, stat_data, None
+
+
+class _RawGeneratorFileobj(io.RawIOBase):
+ def __init__(self, stream):
+ self._stream = stream
+ self._buf = b''
+
+ def readable(self):
+ return True
+
+ def _readinto_from_buf(self, b, index, length):
+ cpy = min(length - index, len(self._buf))
+ if cpy:
+ b[index:index + cpy] = self._buf[:cpy]
+ self._buf = self._buf[cpy:]
+ index += cpy
+ return index
+
+ def readinto(self, b):
+ index = 0
+ length = len(b)
+
+ index = self._readinto_from_buf(b, index, length)
+ if index == length:
+ return index
+
+ try:
+ self._buf += next(self._stream)
+ except StopIteration:
+ return index
+
+ return self._readinto_from_buf(b, index, length)
+
+
+def _stream_generator_to_fileobj(stream):
+ '''Given a generator that generates chunks of bytes, create a readable buffered stream.'''
+ raw = _RawGeneratorFileobj(stream)
+ return io.BufferedReader(raw)
+
+
+def fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=False, log=None):
+ """Fetch a file (as a tar file entry) from a Docker container to local."""
+ considered_in_paths = set()
+
+ while True:
+ if in_path in considered_in_paths:
+ raise DockerFileCopyError('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path))
+ considered_in_paths.add(in_path)
+
+ if log:
+ log('FETCH: Fetching "%s"' % in_path)
+ try:
+ stream = client.get_raw_stream(
+ '/containers/{0}/archive', container,
+ params={'path': in_path},
+ headers={'Accept-Encoding': 'identity'},
+ )
+ except NotFound:
+ return process_none(in_path)
+
+ with tarfile.open(fileobj=_stream_generator_to_fileobj(stream), mode='r|') as tar:
+ symlink_member = None
+ result = None
+ found = False
+ for member in tar:
+ if found:
+ raise DockerUnexpectedError('Received tarfile contains more than one file!')
+ found = True
+ if member.issym():
+ symlink_member = member
+ continue
+ if member.isfile():
+ result = process_regular(in_path, tar, member)
+ continue
+ result = process_other(in_path, member)
+ if symlink_member:
+ if not follow_links:
+ return process_symlink(in_path, symlink_member)
+ in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
+ if log:
+ log('FETCH: Following symbolic link to "%s"' % in_path)
+ continue
+ if found:
+ return result
+ raise DockerUnexpectedError('Received tarfile is empty!')
+
+
+def fetch_file(client, container, in_path, out_path, follow_links=False, log=None):
+ b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
+
+ def process_none(in_path):
+ raise DockerFileNotFound(
+ 'File {in_path} does not exist in container {container}'
+ .format(in_path=in_path, container=container)
+ )
+
+ def process_regular(in_path, tar, member):
+ if not follow_links and os.path.exists(b_out_path):
+ os.unlink(b_out_path)
+
+ in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
+ with open(b_out_path, 'wb') as out_f:
+ shutil.copyfileobj(in_f, out_f)
+ return in_path
+
+ def process_symlink(in_path, member):
+ if os.path.exists(b_out_path):
+ os.unlink(b_out_path)
+
+ os.symlink(member.linkname, b_out_path)
+ return in_path
+
+ def process_other(in_path, member):
+ raise DockerFileCopyError('Remote file "%s" is not a regular file or a symbolic link' % in_path)
+
+ return fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=follow_links, log=log)
+
+
+def _execute_command(client, container, command, log=None, check_rc=False):
+ if log:
+ log('Executing {command} in {container}'.format(command=command, container=container))
+
+ data = {
+ 'Container': container,
+ 'User': '',
+ 'Privileged': False,
+ 'Tty': False,
+ 'AttachStdin': False,
+ 'AttachStdout': True,
+ 'AttachStderr': True,
+ 'Cmd': command,
+ }
+
+ if 'detachKeys' in client._general_configs:
+ data['detachKeys'] = client._general_configs['detachKeys']
+
+ try:
+ exec_data = client.post_json_to_json('/containers/{0}/exec', container, data=data)
+ except NotFound as e:
+ raise_from(
+ DockerFileCopyError('Could not find container "{container}"'.format(container=container)),
+ e,
+ )
+ except APIError as e:
+ if e.response is not None and e.response.status_code == 409:
+ raise_from(
+ DockerFileCopyError('Cannot execute command in paused container "{container}"'.format(container=container)),
+ e,
+ )
+ raise
+ exec_id = exec_data['Id']
+
+ data = {
+ 'Tty': False,
+ 'Detach': False
+ }
+ stdout, stderr = client.post_json_to_stream('/exec/{0}/start', exec_id, stream=False, demux=True, tty=False)
+
+ result = client.get_json('/exec/{0}/json', exec_id)
+
+ rc = result.get('ExitCode') or 0
+ stdout = stdout or b''
+ stderr = stderr or b''
+
+ if log:
+ log('Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}'.format(rc=rc, stdout=stdout, stderr=stderr))
+
+ if check_rc and rc != 0:
+ raise DockerUnexpectedError(
+ 'Obtained unexpected exit code {rc} when running "{command}" in {container}.\nSTDOUT: {stdout}\nSTDERR: {stderr}'
+ .format(command=' '.join(command), container=container, rc=rc, stdout=stdout, stderr=stderr)
+ )
+
+ return rc, stdout, stderr
+
+
+def determine_user_group(client, container, log=None):
+ dummy, stdout, stderr = _execute_command(client, container, ['/bin/sh', '-c', 'id -u && id -g'], check_rc=True, log=log)
+
+ stdout_lines = stdout.splitlines()
+ if len(stdout_lines) != 2:
+ raise DockerUnexpectedError(
+ 'Expected two-line output to obtain user and group ID for container {container}, but got {lc} lines:\n{stdout}'
+ .format(container=container, lc=len(stdout_lines), stdout=stdout)
+ )
+
+ user_id, group_id = stdout_lines
+ try:
+ return int(user_id), int(group_id)
+ except ValueError:
+ raise DockerUnexpectedError(
+ 'Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got "{l1}" and "{l2}" instead'
+ .format(container=container, l1=user_id, l2=group_id)
+ )
diff --git a/ansible_collections/community/docker/plugins/module_utils/image_archive.py b/ansible_collections/community/docker/plugins/module_utils/image_archive.py
new file mode 100644
index 00000000..e174631e
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/image_archive.py
@@ -0,0 +1,157 @@
+# Copyright 2022 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import tarfile
+
+from ansible.module_utils.common.text.converters import to_native
+
+
+class ImageArchiveManifestSummary(object):
+ '''
+ Represents data extracted from a manifest.json found in the tar archive output of the
+ "docker image save some:tag > some.tar" command.
+ '''
+
+ def __init__(self, image_id, repo_tags):
+ '''
+ :param image_id: File name portion of Config entry, e.g. abcde12345 from abcde12345.json
+ :type image_id: str
+ :param repo_tags Docker image names, e.g. ["hello-world:latest"]
+ :type repo_tags: list
+ '''
+
+ self.image_id = image_id
+ self.repo_tags = repo_tags
+
+
+class ImageArchiveInvalidException(Exception):
+ def __init__(self, message, cause):
+ '''
+ :param message: Exception message
+ :type message: str
+ :param cause: Inner exception that this exception wraps
+ :type cause: Exception | None
+ '''
+
+ super(ImageArchiveInvalidException, self).__init__(message)
+
+ # Python 2 doesn't support causes
+ self.cause = cause
+
+
+def api_image_id(archive_image_id):
+ '''
+ Accepts an image hash in the format stored in manifest.json, and returns an equivalent identifier
+ that represents the same image hash, but in the format presented by the Docker Engine API.
+
+ :param archive_image_id: plain image hash
+ :type archive_image_id: str
+
+ :returns: Prefixed hash used by REST api
+ :rtype: str
+ '''
+
+ return 'sha256:%s' % archive_image_id
+
+
+def archived_image_manifest(archive_path):
+ '''
+ Attempts to get Image.Id and image name from metadata stored in the image
+ archive tar file.
+
+ The tar should contain a file "manifest.json" with an array with a single entry,
+ and the entry should have a Config field with the image ID in its file name, as
+ well as a RepoTags list, which typically has only one entry.
+
+ :raises:
+ ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
+
+ :param archive_path: Tar file to read
+ :type archive_path: str
+
+ :return: None, if no file at archive_path, or the extracted image ID, which will not have a sha256: prefix.
+ :rtype: ImageArchiveManifestSummary
+ '''
+
+ try:
+ # FileNotFoundError does not exist in Python 2
+ if not os.path.isfile(archive_path):
+ return None
+
+ tf = tarfile.open(archive_path, 'r')
+ try:
+ try:
+ ef = tf.extractfile('manifest.json')
+ try:
+ text = ef.read().decode('utf-8')
+ manifest = json.loads(text)
+ except Exception as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to decode and deserialize manifest.json: %s" % to_native(exc),
+ exc
+ )
+ finally:
+ # In Python 2.6, this does not have __exit__
+ ef.close()
+
+ if len(manifest) != 1:
+ raise ImageArchiveInvalidException(
+ "Expected to have one entry in manifest.json but found %s" % len(manifest),
+ None
+ )
+
+ m0 = manifest[0]
+
+ try:
+ config_file = m0['Config']
+ except KeyError as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to get Config entry from manifest.json: %s" % to_native(exc),
+ exc
+ )
+
+ # Extracts hash without 'sha256:' prefix
+ try:
+ # Strip off .json filename extension, leaving just the hash.
+ image_id = os.path.splitext(config_file)[0]
+ except Exception as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to extract image id from config file name %s: %s" % (config_file, to_native(exc)),
+ exc
+ )
+
+ try:
+ repo_tags = m0['RepoTags']
+ except KeyError as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to get RepoTags entry from manifest.json: %s" % to_native(exc),
+ exc
+ )
+
+ return ImageArchiveManifestSummary(
+ image_id=image_id,
+ repo_tags=repo_tags
+ )
+
+ except ImageArchiveInvalidException:
+ raise
+ except Exception as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to extract manifest.json from tar file %s: %s" % (archive_path, to_native(exc)),
+ exc
+ )
+
+ finally:
+ # In Python 2.6, TarFile does not have __exit__
+ tf.close()
+
+ except ImageArchiveInvalidException:
+ raise
+ except Exception as exc:
+ raise ImageArchiveInvalidException("Failed to open tar file %s: %s" % (archive_path, to_native(exc)), exc)
diff --git a/ansible_collections/community/docker/plugins/module_utils/module_container/base.py b/ansible_collections/community/docker/plugins/module_utils/module_container/base.py
new file mode 100644
index 00000000..21c29226
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/module_container/base.py
@@ -0,0 +1,1204 @@
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import abc
+import os
+import re
+import shlex
+
+from functools import partial
+
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.six import string_types
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ clean_dict_booleans_for_docker_api,
+ normalize_healthcheck,
+ omit_none_from_dict,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
+ parse_env_file,
+)
+
+
+_DEFAULT_IP_REPLACEMENT_STRING = '[[DEFAULT_IP:iewahhaeB4Sae6Aen8IeShairoh4zeph7xaekoh8Geingunaesaeweiy3ooleiwi]]'
+
+
+_MOUNT_OPTION_TYPES = dict(
+ volume_driver='volume',
+ volume_options='volume',
+ propagation='bind',
+ no_copy='volume',
+ labels='volume',
+ tmpfs_size='tmpfs',
+ tmpfs_mode='tmpfs',
+)
+
+
+def _get_ansible_type(type):
+ if type == 'set':
+ return 'list'
+ if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'):
+ raise Exception('Invalid type "%s"' % (type, ))
+ return type
+
+
+class Option(object):
+ def __init__(
+ self,
+ name,
+ type,
+ owner,
+ ansible_type=None,
+ elements=None,
+ ansible_elements=None,
+ ansible_suboptions=None,
+ ansible_aliases=None,
+ ansible_choices=None,
+ needs_no_suboptions=False,
+ default_comparison=None,
+ not_a_container_option=False,
+ not_an_ansible_option=False,
+ copy_comparison_from=None,
+ ):
+ self.name = name
+ self.type = type
+ self.ansible_type = ansible_type or _get_ansible_type(type)
+ needs_elements = self.type in ('list', 'set')
+ needs_ansible_elements = self.ansible_type in ('list', )
+ if elements is not None and not needs_elements:
+ raise Exception('elements only allowed for lists/sets')
+ if elements is None and needs_elements:
+ raise Exception('elements required for lists/sets')
+ if ansible_elements is not None and not needs_ansible_elements:
+ raise Exception('Ansible elements only allowed for Ansible lists')
+ if (elements is None and ansible_elements is None) and needs_ansible_elements:
+ raise Exception('Ansible elements required for Ansible lists')
+ self.elements = elements if needs_elements else None
+ self.ansible_elements = (ansible_elements or _get_ansible_type(elements)) if needs_ansible_elements else None
+ needs_suboptions = (self.ansible_type == 'list' and self.ansible_elements == 'dict') or (self.ansible_type == 'dict')
+ if ansible_suboptions is not None and not needs_suboptions:
+ raise Exception('suboptions only allowed for Ansible lists with dicts, or Ansible dicts')
+ if ansible_suboptions is None and needs_suboptions and not needs_no_suboptions and not not_an_ansible_option:
+ raise Exception('suboptions required for Ansible lists with dicts, or Ansible dicts')
+ self.ansible_suboptions = ansible_suboptions if needs_suboptions else None
+ self.ansible_aliases = ansible_aliases or []
+ self.ansible_choices = ansible_choices
+ comparison_type = self.type
+ if comparison_type == 'set' and self.elements == 'dict':
+ comparison_type = 'set(dict)'
+ elif comparison_type not in ('set', 'list', 'dict'):
+ comparison_type = 'value'
+ self.comparison_type = comparison_type
+ if default_comparison is not None:
+ self.comparison = default_comparison
+ elif comparison_type in ('list', 'value'):
+ self.comparison = 'strict'
+ else:
+ self.comparison = 'allow_more_present'
+ self.not_a_container_option = not_a_container_option
+ self.not_an_ansible_option = not_an_ansible_option
+ self.copy_comparison_from = copy_comparison_from
+
+
+class OptionGroup(object):
+ def __init__(
+ self,
+ preprocess=None,
+ ansible_mutually_exclusive=None,
+ ansible_required_together=None,
+ ansible_required_one_of=None,
+ ansible_required_if=None,
+ ansible_required_by=None,
+ ):
+ if preprocess is None:
+ def preprocess(module, values):
+ return values
+ self.preprocess = preprocess
+ self.options = []
+ self.all_options = []
+ self.engines = {}
+ self.ansible_mutually_exclusive = ansible_mutually_exclusive or []
+ self.ansible_required_together = ansible_required_together or []
+ self.ansible_required_one_of = ansible_required_one_of or []
+ self.ansible_required_if = ansible_required_if or []
+ self.ansible_required_by = ansible_required_by or {}
+ self.argument_spec = {}
+
+ def add_option(self, *args, **kwargs):
+ option = Option(*args, owner=self, **kwargs)
+ if not option.not_a_container_option:
+ self.options.append(option)
+ self.all_options.append(option)
+ if not option.not_an_ansible_option:
+ ansible_option = {
+ 'type': option.ansible_type,
+ }
+ if option.ansible_elements is not None:
+ ansible_option['elements'] = option.ansible_elements
+ if option.ansible_suboptions is not None:
+ ansible_option['options'] = option.ansible_suboptions
+ if option.ansible_aliases:
+ ansible_option['aliases'] = option.ansible_aliases
+ if option.ansible_choices is not None:
+ ansible_option['choices'] = option.ansible_choices
+ self.argument_spec[option.name] = ansible_option
+ return self
+
+ def supports_engine(self, engine_name):
+ return engine_name in self.engines
+
+ def get_engine(self, engine_name):
+ return self.engines[engine_name]
+
+ def add_engine(self, engine_name, engine):
+ self.engines[engine_name] = engine
+ return self
+
+
+class Engine(object):
+ min_api_version = None # string or None
+ min_api_version_obj = None # LooseVersion object or None
+
+ @abc.abstractmethod
+ def get_value(self, module, container, api_version, options):
+ pass
+
+ @abc.abstractmethod
+ def set_value(self, module, data, api_version, options, values):
+ pass
+
+ @abc.abstractmethod
+ def get_expected_values(self, module, client, api_version, options, image, values):
+ pass
+
+ @abc.abstractmethod
+ def ignore_mismatching_result(self, module, client, api_version, option, image, container_value, expected_value):
+ pass
+
+ @abc.abstractmethod
+ def preprocess_value(self, module, client, api_version, options, values):
+ pass
+
+ @abc.abstractmethod
+ def update_value(self, module, data, api_version, options, values):
+ pass
+
+ @abc.abstractmethod
+ def can_set_value(self, api_version):
+ pass
+
+ @abc.abstractmethod
+ def can_update_value(self, api_version):
+ pass
+
+
+class EngineDriver(object):
+ name = None # string
+
+ @abc.abstractmethod
+ def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None):
+ # Return (module, active_options, client)
+ pass
+
+ @abc.abstractmethod
+ def get_api_version(self, client):
+ pass
+
+ @abc.abstractmethod
+ def get_container_id(self, container):
+ pass
+
+ @abc.abstractmethod
+ def get_image_from_container(self, container):
+ pass
+
+ @abc.abstractmethod
+ def get_image_name_from_container(self, container):
+ pass
+
+ @abc.abstractmethod
+ def is_container_removing(self, container):
+ pass
+
+ @abc.abstractmethod
+ def is_container_running(self, container):
+ pass
+
+ @abc.abstractmethod
+ def is_container_paused(self, container):
+ pass
+
+ @abc.abstractmethod
+ def inspect_container_by_name(self, client, container_name):
+ pass
+
+ @abc.abstractmethod
+ def inspect_container_by_id(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def inspect_image_by_id(self, client, image_id):
+ pass
+
+ @abc.abstractmethod
+ def inspect_image_by_name(self, client, repository, tag):
+ pass
+
+ @abc.abstractmethod
+ def pull_image(self, client, repository, tag, platform=None):
+ pass
+
+ @abc.abstractmethod
+ def pause_container(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def unpause_container(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def disconnect_container_from_network(self, client, container_id, network_id):
+ pass
+
+ @abc.abstractmethod
+ def connect_container_to_network(self, client, container_id, network_id, parameters=None):
+ pass
+
+ @abc.abstractmethod
+ def create_container(self, client, container_name, create_parameters):
+ pass
+
+ @abc.abstractmethod
+ def start_container(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def wait_for_container(self, client, container_id, timeout=None):
+ pass
+
+ @abc.abstractmethod
+ def get_container_output(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def update_container(self, client, container_id, update_parameters):
+ pass
+
+ @abc.abstractmethod
+ def restart_container(self, client, container_id, timeout=None):
+ pass
+
+ @abc.abstractmethod
+ def kill_container(self, client, container_id, kill_signal=None):
+ pass
+
+ @abc.abstractmethod
+ def stop_container(self, client, container_id, timeout=None):
+ pass
+
+ @abc.abstractmethod
+ def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False):
+ pass
+
+ @abc.abstractmethod
+ def run(self, runner, client):
+ pass
+
+
+def _is_volume_permissions(mode):
+ for part in mode.split(','):
+ if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
+ return False
+ return True
+
+
+def _parse_port_range(range_or_port, module):
+ '''
+ Parses a string containing either a single port or a range of ports.
+
+ Returns a list of integers for each port in the list.
+ '''
+ if '-' in range_or_port:
+ try:
+ start, end = [int(port) for port in range_or_port.split('-')]
+ except Exception:
+ module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port))
+ if end < start:
+ module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port))
+ return list(range(start, end + 1))
+ else:
+ try:
+ return [int(range_or_port)]
+ except Exception:
+ module.fail_json(msg='Invalid port: "{0}"'.format(range_or_port))
+
+
+def _split_colon_ipv6(text, module):
+ '''
+ Split string by ':', while keeping IPv6 addresses in square brackets in one component.
+ '''
+ if '[' not in text:
+ return text.split(':')
+ start = 0
+ result = []
+ while start < len(text):
+ i = text.find('[', start)
+ if i < 0:
+ result.extend(text[start:].split(':'))
+ break
+ j = text.find(']', i)
+ if j < 0:
+ module.fail_json(msg='Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1))
+ result.extend(text[start:i].split(':'))
+ k = text.find(':', j)
+ if k < 0:
+ result[-1] += text[i:]
+ start = len(text)
+ else:
+ result[-1] += text[i:k]
+ if k == len(text):
+ result.append('')
+ break
+ start = k + 1
+ return result
+
+
+def _preprocess_command(module, values):
+ if 'command' not in values:
+ return values
+ value = values['command']
+ if module.params['command_handling'] == 'correct':
+ if value is not None:
+ if not isinstance(value, list):
+ # convert from str to list
+ value = shlex.split(to_text(value, errors='surrogate_or_strict'))
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ elif value:
+ # convert from list to str
+ if isinstance(value, list):
+ value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value]))
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ else:
+ value = shlex.split(to_text(value, errors='surrogate_or_strict'))
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ else:
+ return {}
+ return {
+ 'command': value,
+ }
+
+
+def _preprocess_entrypoint(module, values):
+ if 'entrypoint' not in values:
+ return values
+ value = values['entrypoint']
+ if module.params['command_handling'] == 'correct':
+ if value is not None:
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ elif value:
+ # convert from list to str.
+ value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value]))
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ else:
+ return {}
+ return {
+ 'entrypoint': value,
+ }
+
+
+def _preprocess_env(module, values):
+ if not values:
+ return {}
+ final_env = {}
+ if 'env_file' in values:
+ parsed_env_file = parse_env_file(values['env_file'])
+ for name, value in parsed_env_file.items():
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ if 'env' in values:
+ for name, value in values['env'].items():
+ if not isinstance(value, string_types):
+ module.fail_json(msg='Non-string value found for env option. Ambiguous env options must be '
+ 'wrapped in quotes to avoid them being interpreted. Key: %s' % (name, ))
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ formatted_env = []
+ for key, value in final_env.items():
+ formatted_env.append('%s=%s' % (key, value))
+ return {
+ 'env': formatted_env,
+ }
+
+
+def _preprocess_healthcheck(module, values):
+ if not values:
+ return {}
+ return {
+ 'healthcheck': normalize_healthcheck(values['healthcheck'], normalize_test=False),
+ }
+
+
+def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None):
+ if name not in values:
+ return values
+ try:
+ value = values[name]
+ if unlimited_value is not None and value in ('unlimited', str(unlimited_value)):
+ value = unlimited_value
+ else:
+ value = human_to_bytes(value)
+ values[name] = value
+ return values
+ except ValueError as exc:
+ module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
+
+
+def _preprocess_mac_address(module, values):
+ if 'mac_address' not in values:
+ return values
+ return {
+ 'mac_address': values['mac_address'].replace('-', ':'),
+ }
+
+
+def _preprocess_networks(module, values):
+ if module.params['networks_cli_compatible'] is True and values.get('networks') and 'network_mode' not in values:
+ # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode
+ # (assuming no explicit value is specified for network_mode)
+ values['network_mode'] = values['networks'][0]['name']
+
+ if 'networks' in values:
+ for network in values['networks']:
+ if network['links']:
+ parsed_links = []
+ for link in network['links']:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 1:
+ parsed_link = (link, link)
+ parsed_links.append(tuple(parsed_link))
+ network['links'] = parsed_links
+
+ return values
+
+
+def _preprocess_sysctls(module, values):
+ if 'sysctls' in values:
+ for key, value in values['sysctls'].items():
+ values['sysctls'][key] = to_text(value, errors='surrogate_or_strict')
+ return values
+
+
+def _preprocess_tmpfs(module, values):
+ if 'tmpfs' not in values:
+ return values
+ result = {}
+ for tmpfs_spec in values['tmpfs']:
+ split_spec = tmpfs_spec.split(":", 1)
+ if len(split_spec) > 1:
+ result[split_spec[0]] = split_spec[1]
+ else:
+ result[split_spec[0]] = ""
+ return {
+ 'tmpfs': result
+ }
+
+
+def _preprocess_ulimits(module, values):
+ if 'ulimits' not in values:
+ return values
+ result = []
+ for limit in values['ulimits']:
+ limits = dict()
+ pieces = limit.split(':')
+ if len(pieces) >= 2:
+ limits['Name'] = pieces[0]
+ limits['Soft'] = int(pieces[1])
+ limits['Hard'] = int(pieces[1])
+ if len(pieces) == 3:
+ limits['Hard'] = int(pieces[2])
+ result.append(limits)
+ return {
+ 'ulimits': result,
+ }
+
+
+def _preprocess_mounts(module, values):
+ last = dict()
+
+ def check_collision(t, name):
+ if t in last:
+ if name == last[t]:
+ module.fail_json(msg='The mount point "{0}" appears twice in the {1} option'.format(t, name))
+ else:
+ module.fail_json(msg='The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t]))
+ last[t] = name
+
+ if 'mounts' in values:
+ mounts = []
+ for mount in values['mounts']:
+ target = mount['target']
+ mount_type = mount['type']
+
+ check_collision(target, 'mounts')
+
+ mount_dict = dict(mount)
+
+ # Sanity checks
+ if mount['source'] is None and mount_type not in ('tmpfs', 'volume'):
+ module.fail_json(msg='source must be specified for mount "{0}" of type "{1}"'.format(target, mount_type))
+ for option, req_mount_type in _MOUNT_OPTION_TYPES.items():
+ if mount[option] is not None and mount_type != req_mount_type:
+ module.fail_json(
+ msg='{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type)
+ )
+
+ # Streamline options
+ volume_options = mount_dict.pop('volume_options')
+ if mount_dict['volume_driver'] and volume_options:
+ mount_dict['volume_options'] = clean_dict_booleans_for_docker_api(volume_options)
+ if mount_dict['labels']:
+ mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels'])
+ if mount_dict['tmpfs_size'] is not None:
+ try:
+ mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size'])
+ except ValueError as exc:
+ module.fail_json(msg='Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, to_native(exc)))
+ if mount_dict['tmpfs_mode'] is not None:
+ try:
+ mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8)
+ except Exception as dummy:
+ module.fail_json(msg='tmp_fs mode of mount "{0}" is not an octal string!'.format(target))
+
+ # Add result to list
+ mounts.append(omit_none_from_dict(mount_dict))
+ values['mounts'] = mounts
+ if 'volumes' in values:
+ new_vols = []
+ for vol in values['volumes']:
+ parts = vol.split(':')
+ if ':' in vol:
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not _is_volume_permissions(mode):
+ module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode))
+ if re.match(r'[.~]', host):
+ host = os.path.abspath(os.path.expanduser(host))
+ check_collision(container, 'volumes')
+ new_vols.append("%s:%s:%s" % (host, container, mode))
+ continue
+ elif len(parts) == 2:
+ if not _is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
+ host = os.path.abspath(os.path.expanduser(parts[0]))
+ check_collision(parts[1], 'volumes')
+ new_vols.append("%s:%s:rw" % (host, parts[1]))
+ continue
+ check_collision(parts[min(1, len(parts) - 1)], 'volumes')
+ new_vols.append(vol)
+ values['volumes'] = new_vols
+ new_binds = []
+ for vol in new_vols:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not _is_volume_permissions(mode):
+ module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode))
+ elif len(parts) == 2:
+ if not _is_volume_permissions(parts[1]):
+ host, container, mode = (parts + ['rw'])
+ if host is not None:
+ new_binds.append('%s:%s:%s' % (host, container, mode))
+ values['volume_binds'] = new_binds
+ return values
+
+
+def _preprocess_log(module, values):
+ result = {}
+ if 'log_driver' not in values:
+ return result
+ result['log_driver'] = values['log_driver']
+ if 'log_options' in values:
+ options = {}
+ for k, v in values['log_options'].items():
+ if not isinstance(v, string_types):
+ module.warn(
+ "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
+ "If this is not correct, or you want to avoid such warnings, please quote the value." % (
+ k, to_text(v, errors='surrogate_or_strict'))
+ )
+ v = to_text(v, errors='surrogate_or_strict')
+ options[k] = v
+ result['log_options'] = options
+ return result
+
+
+def _preprocess_ports(module, values):
+ if 'published_ports' in values:
+ if 'all' in values['published_ports']:
+ module.fail_json(
+ msg='Specifying "all" in published_ports is no longer allowed. Set publish_all_ports to "true" instead '
+ 'to randomly assign port mappings for those not specified by published_ports.')
+
+ binds = {}
+ for port in values['published_ports']:
+ parts = _split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), module)
+ container_port = parts[-1]
+ protocol = ''
+ if '/' in container_port:
+ container_port, protocol = parts[-1].split('/')
+ container_ports = _parse_port_range(container_port, module)
+
+ p_len = len(parts)
+ if p_len == 1:
+ port_binds = len(container_ports) * [(_DEFAULT_IP_REPLACEMENT_STRING, )]
+ elif p_len == 2:
+ if len(container_ports) == 1:
+ port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, parts[0])]
+ else:
+ port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, port) for port in _parse_port_range(parts[0], module)]
+ elif p_len == 3:
+ # We only allow IPv4 and IPv6 addresses for the bind address
+ ipaddr = parts[0]
+ if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr):
+ module.fail_json(
+ msg='Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
+ 'Use the dig lookup to resolve hostnames. (Found hostname: {0})'.format(ipaddr)
+ )
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
+ ipaddr = ipaddr[1:-1]
+ if parts[1]:
+ if len(container_ports) == 1:
+ port_binds = [(ipaddr, parts[1])]
+ else:
+ port_binds = [(ipaddr, port) for port in _parse_port_range(parts[1], module)]
+ else:
+ port_binds = len(container_ports) * [(ipaddr,)]
+ else:
+ module.fail_json(
+ msg='Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. '
+ 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?' % (port, p_len)
+ )
+
+ for bind, container_port in zip(port_binds, container_ports):
+ idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
+ if idx in binds:
+ old_bind = binds[idx]
+ if isinstance(old_bind, list):
+ old_bind.append(bind)
+ else:
+ binds[idx] = [old_bind, bind]
+ else:
+ binds[idx] = bind
+ values['published_ports'] = binds
+
+ exposed = []
+ if 'exposed_ports' in values:
+ for port in values['exposed_ports']:
+ port = to_text(port, errors='surrogate_or_strict').strip()
+ protocol = 'tcp'
+ match = re.search(r'(/.+$)', port)
+ if match:
+ protocol = match.group(1).replace('/', '')
+ port = re.sub(r'/.+$', '', port)
+ exposed.append((port, protocol))
+ if 'published_ports' in values:
+ # Any published port should also be exposed
+ for publish_port in values['published_ports']:
+ match = False
+ if isinstance(publish_port, string_types) and '/' in publish_port:
+ port, protocol = publish_port.split('/')
+ port = int(port)
+ else:
+ protocol = 'tcp'
+ port = int(publish_port)
+ for exposed_port in exposed:
+ if exposed_port[1] != protocol:
+ continue
+ if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
+ start_port, end_port = exposed_port[0].split('-')
+ if int(start_port) <= port <= int(end_port):
+ match = True
+ elif exposed_port[0] == port:
+ match = True
+ if not match:
+ exposed.append((port, protocol))
+ values['ports'] = exposed
+ return values
+
+
+OPTION_AUTO_REMOVE = (
+ OptionGroup()
+ .add_option('auto_remove', type='bool')
+)
+
+OPTION_BLKIO_WEIGHT = (
+ OptionGroup()
+ .add_option('blkio_weight', type='int')
+)
+
+OPTION_CAPABILITIES = (
+ OptionGroup()
+ .add_option('capabilities', type='set', elements='str')
+)
+
+OPTION_CAP_DROP = (
+ OptionGroup()
+ .add_option('cap_drop', type='set', elements='str')
+)
+
+OPTION_CGROUP_NS_MODE = (
+ OptionGroup()
+ .add_option('cgroupns_mode', type='str', ansible_choices=['private', 'host'])
+)
+
+OPTION_CGROUP_PARENT = (
+ OptionGroup()
+ .add_option('cgroup_parent', type='str')
+)
+
+OPTION_COMMAND = (
+ OptionGroup(preprocess=_preprocess_command)
+ .add_option('command', type='list', elements='str', ansible_type='raw')
+)
+
+OPTION_CPU_PERIOD = (
+ OptionGroup()
+ .add_option('cpu_period', type='int')
+)
+
+OPTION_CPU_QUOTA = (
+ OptionGroup()
+ .add_option('cpu_quota', type='int')
+)
+
+OPTION_CPUSET_CPUS = (
+ OptionGroup()
+ .add_option('cpuset_cpus', type='str')
+)
+
+OPTION_CPUSET_MEMS = (
+ OptionGroup()
+ .add_option('cpuset_mems', type='str')
+)
+
+OPTION_CPU_SHARES = (
+ OptionGroup()
+ .add_option('cpu_shares', type='int')
+)
+
+OPTION_ENTRYPOINT = (
+ OptionGroup(preprocess=_preprocess_entrypoint)
+ .add_option('entrypoint', type='list', elements='str')
+)
+
+OPTION_CPUS = (
+ OptionGroup()
+ .add_option('cpus', type='int', ansible_type='float')
+)
+
+OPTION_DETACH_INTERACTIVE = (
+ OptionGroup()
+ .add_option('detach', type='bool')
+ .add_option('interactive', type='bool')
+)
+
+OPTION_DEVICES = (
+ OptionGroup()
+ .add_option('devices', type='set', elements='dict', ansible_elements='str')
+)
+
+OPTION_DEVICE_READ_BPS = (
+ OptionGroup()
+ .add_option('device_read_bps', type='set', elements='dict', ansible_suboptions=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ ))
+)
+
+OPTION_DEVICE_WRITE_BPS = (
+ OptionGroup()
+ .add_option('device_write_bps', type='set', elements='dict', ansible_suboptions=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ ))
+)
+
+OPTION_DEVICE_READ_IOPS = (
+ OptionGroup()
+ .add_option('device_read_iops', type='set', elements='dict', ansible_suboptions=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ ))
+)
+
+OPTION_DEVICE_WRITE_IOPS = (
+ OptionGroup()
+ .add_option('device_write_iops', type='set', elements='dict', ansible_suboptions=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ ))
+)
+
+OPTION_DEVICE_REQUESTS = (
+ OptionGroup()
+ .add_option('device_requests', type='set', elements='dict', ansible_suboptions=dict(
+ capabilities=dict(type='list', elements='list'),
+ count=dict(type='int'),
+ device_ids=dict(type='list', elements='str'),
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ ))
+)
+
+OPTION_DNS_SERVERS = (
+ OptionGroup()
+ .add_option('dns_servers', type='list', elements='str')
+)
+
+OPTION_DNS_OPTS = (
+ OptionGroup()
+ .add_option('dns_opts', type='set', elements='str')
+)
+
+OPTION_DNS_SEARCH_DOMAINS = (
+ OptionGroup()
+ .add_option('dns_search_domains', type='list', elements='str')
+)
+
+OPTION_DOMAINNAME = (
+ OptionGroup()
+ .add_option('domainname', type='str')
+)
+
+OPTION_ENVIRONMENT = (
+ OptionGroup(preprocess=_preprocess_env)
+ .add_option('env', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True)
+ .add_option('env_file', type='set', ansible_type='path', elements='str', not_a_container_option=True)
+)
+
+OPTION_ETC_HOSTS = (
+ OptionGroup()
+ .add_option('etc_hosts', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True)
+)
+
+OPTION_GROUPS = (
+ OptionGroup()
+ .add_option('groups', type='set', elements='str')
+)
+
+OPTION_HEALTHCHECK = (
+ OptionGroup(preprocess=_preprocess_healthcheck)
+ .add_option('healthcheck', type='dict', ansible_suboptions=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ ))
+)
+
+OPTION_HOSTNAME = (
+ OptionGroup()
+ .add_option('hostname', type='str')
+)
+
+OPTION_IMAGE = (
+ OptionGroup(preprocess=_preprocess_networks)
+ .add_option('image', type='str')
+)
+
+OPTION_INIT = (
+ OptionGroup()
+ .add_option('init', type='bool')
+)
+
+OPTION_IPC_MODE = (
+ OptionGroup()
+ .add_option('ipc_mode', type='str')
+)
+
+OPTION_KERNEL_MEMORY = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory'))
+ .add_option('kernel_memory', type='int', ansible_type='str')
+)
+
+OPTION_LABELS = (
+ OptionGroup()
+ .add_option('labels', type='dict', needs_no_suboptions=True)
+)
+
+OPTION_LINKS = (
+ OptionGroup()
+ .add_option('links', type='set', elements='list', ansible_elements='str')
+)
+
+OPTION_LOG_DRIVER_OPTIONS = (
+ OptionGroup(preprocess=_preprocess_log, ansible_required_by={'log_options': ['log_driver']})
+ .add_option('log_driver', type='str')
+ .add_option('log_options', type='dict', ansible_aliases=['log_opt'], needs_no_suboptions=True)
+)
+
+OPTION_MAC_ADDRESS = (
+ OptionGroup(preprocess=_preprocess_mac_address)
+ .add_option('mac_address', type='str')
+)
+
+OPTION_MEMORY = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory'))
+ .add_option('memory', type='int', ansible_type='str')
+)
+
+OPTION_MEMORY_RESERVATION = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation'))
+ .add_option('memory_reservation', type='int', ansible_type='str')
+)
+
+OPTION_MEMORY_SWAP = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1))
+ .add_option('memory_swap', type='int', ansible_type='str')
+)
+
+OPTION_MEMORY_SWAPPINESS = (
+ OptionGroup()
+ .add_option('memory_swappiness', type='int')
+)
+
+OPTION_STOP_TIMEOUT = (
+ OptionGroup()
+ .add_option('stop_timeout', type='int', default_comparison='ignore')
+)
+
+OPTION_NETWORK = (
+ OptionGroup(preprocess=_preprocess_networks)
+ .add_option('network_mode', type='str')
+ .add_option('networks', type='set', elements='dict', ansible_suboptions=dict(
+ name=dict(type='str', required=True),
+ ipv4_address=dict(type='str'),
+ ipv6_address=dict(type='str'),
+ aliases=dict(type='list', elements='str'),
+ links=dict(type='list', elements='str'),
+ ))
+)
+
+OPTION_OOM_KILLER = (
+ OptionGroup()
+ .add_option('oom_killer', type='bool')
+)
+
+OPTION_OOM_SCORE_ADJ = (
+ OptionGroup()
+ .add_option('oom_score_adj', type='int')
+)
+
+OPTION_PID_MODE = (
+ OptionGroup()
+ .add_option('pid_mode', type='str')
+)
+
+OPTION_PIDS_LIMIT = (
+ OptionGroup()
+ .add_option('pids_limit', type='int')
+)
+
+OPTION_PLATFORM = (
+ OptionGroup()
+ .add_option('platform', type='str')
+)
+
+OPTION_PRIVILEGED = (
+ OptionGroup()
+ .add_option('privileged', type='bool')
+)
+
+OPTION_READ_ONLY = (
+ OptionGroup()
+ .add_option('read_only', type='bool')
+)
+
+OPTION_RESTART_POLICY = (
+ OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']})
+ .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped'])
+ .add_option('restart_retries', type='int')
+)
+
+OPTION_RUNTIME = (
+ OptionGroup()
+ .add_option('runtime', type='str')
+)
+
+OPTION_SECURITY_OPTS = (
+ OptionGroup()
+ .add_option('security_opts', type='set', elements='str')
+)
+
+OPTION_SHM_SIZE = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='shm_size'))
+ .add_option('shm_size', type='int', ansible_type='str')
+)
+
+OPTION_STOP_SIGNAL = (
+ OptionGroup()
+ .add_option('stop_signal', type='str')
+)
+
+OPTION_STORAGE_OPTS = (
+ OptionGroup()
+ .add_option('storage_opts', type='dict', needs_no_suboptions=True)
+)
+
+OPTION_SYSCTLS = (
+ OptionGroup(preprocess=_preprocess_sysctls)
+ .add_option('sysctls', type='dict', needs_no_suboptions=True)
+)
+
+OPTION_TMPFS = (
+ OptionGroup(preprocess=_preprocess_tmpfs)
+ .add_option('tmpfs', type='dict', ansible_type='list', ansible_elements='str')
+)
+
+OPTION_TTY = (
+ OptionGroup()
+ .add_option('tty', type='bool')
+)
+
+OPTION_ULIMITS = (
+ OptionGroup(preprocess=_preprocess_ulimits)
+ .add_option('ulimits', type='set', elements='dict', ansible_elements='str')
+)
+
+OPTION_USER = (
+ OptionGroup()
+ .add_option('user', type='str')
+)
+
+OPTION_USERNS_MODE = (
+ OptionGroup()
+ .add_option('userns_mode', type='str')
+)
+
+OPTION_UTS = (
+ OptionGroup()
+ .add_option('uts', type='str')
+)
+
+OPTION_VOLUME_DRIVER = (
+ OptionGroup()
+ .add_option('volume_driver', type='str')
+)
+
+OPTION_VOLUMES_FROM = (
+ OptionGroup()
+ .add_option('volumes_from', type='set', elements='str')
+)
+
+OPTION_WORKING_DIR = (
+ OptionGroup()
+ .add_option('working_dir', type='str')
+)
+
+OPTION_MOUNTS_VOLUMES = (
+ OptionGroup(preprocess=_preprocess_mounts)
+ .add_option('mounts', type='set', elements='dict', ansible_suboptions=dict(
+ target=dict(type='str', required=True),
+ source=dict(type='str'),
+ type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'),
+ read_only=dict(type='bool'),
+ consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']),
+ propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']),
+ no_copy=dict(type='bool'),
+ labels=dict(type='dict'),
+ volume_driver=dict(type='str'),
+ volume_options=dict(type='dict'),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='str'),
+ ))
+ .add_option('volumes', type='set', elements='str')
+ .add_option('volume_binds', type='set', elements='str', not_an_ansible_option=True, copy_comparison_from='volumes')
+)
+
+OPTION_PORTS = (
+ OptionGroup(preprocess=_preprocess_ports)
+ .add_option('exposed_ports', type='set', elements='str', ansible_aliases=['exposed', 'expose'])
+ .add_option('publish_all_ports', type='bool')
+ .add_option('published_ports', type='dict', ansible_type='list', ansible_elements='str', ansible_aliases=['ports'])
+ .add_option('ports', type='set', elements='str', not_an_ansible_option=True, default_comparison='ignore')
+)
+
+OPTIONS = [
+ OPTION_AUTO_REMOVE,
+ OPTION_BLKIO_WEIGHT,
+ OPTION_CAPABILITIES,
+ OPTION_CAP_DROP,
+ OPTION_CGROUP_NS_MODE,
+ OPTION_CGROUP_PARENT,
+ OPTION_COMMAND,
+ OPTION_CPU_PERIOD,
+ OPTION_CPU_QUOTA,
+ OPTION_CPUSET_CPUS,
+ OPTION_CPUSET_MEMS,
+ OPTION_CPU_SHARES,
+ OPTION_ENTRYPOINT,
+ OPTION_CPUS,
+ OPTION_DETACH_INTERACTIVE,
+ OPTION_DEVICES,
+ OPTION_DEVICE_READ_BPS,
+ OPTION_DEVICE_WRITE_BPS,
+ OPTION_DEVICE_READ_IOPS,
+ OPTION_DEVICE_WRITE_IOPS,
+ OPTION_DEVICE_REQUESTS,
+ OPTION_DNS_SERVERS,
+ OPTION_DNS_OPTS,
+ OPTION_DNS_SEARCH_DOMAINS,
+ OPTION_DOMAINNAME,
+ OPTION_ENVIRONMENT,
+ OPTION_ETC_HOSTS,
+ OPTION_GROUPS,
+ OPTION_HEALTHCHECK,
+ OPTION_HOSTNAME,
+ OPTION_IMAGE,
+ OPTION_INIT,
+ OPTION_IPC_MODE,
+ OPTION_KERNEL_MEMORY,
+ OPTION_LABELS,
+ OPTION_LINKS,
+ OPTION_LOG_DRIVER_OPTIONS,
+ OPTION_MAC_ADDRESS,
+ OPTION_MEMORY,
+ OPTION_MEMORY_RESERVATION,
+ OPTION_MEMORY_SWAP,
+ OPTION_MEMORY_SWAPPINESS,
+ OPTION_STOP_TIMEOUT,
+ OPTION_NETWORK,
+ OPTION_OOM_KILLER,
+ OPTION_OOM_SCORE_ADJ,
+ OPTION_PID_MODE,
+ OPTION_PIDS_LIMIT,
+ OPTION_PLATFORM,
+ OPTION_PRIVILEGED,
+ OPTION_READ_ONLY,
+ OPTION_RESTART_POLICY,
+ OPTION_RUNTIME,
+ OPTION_SECURITY_OPTS,
+ OPTION_SHM_SIZE,
+ OPTION_STOP_SIGNAL,
+ OPTION_STORAGE_OPTS,
+ OPTION_SYSCTLS,
+ OPTION_TMPFS,
+ OPTION_TTY,
+ OPTION_ULIMITS,
+ OPTION_USER,
+ OPTION_USERNS_MODE,
+ OPTION_UTS,
+ OPTION_VOLUME_DRIVER,
+ OPTION_VOLUMES_FROM,
+ OPTION_WORKING_DIR,
+ OPTION_MOUNTS_VOLUMES,
+ OPTION_PORTS,
+]
diff --git a/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py b/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py
new file mode 100644
index 00000000..cccf72df
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py
@@ -0,0 +1,1353 @@
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.text.formatters import human_to_bytes
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.module_container.base import (
+ OPTION_AUTO_REMOVE,
+ OPTION_BLKIO_WEIGHT,
+ OPTION_CAPABILITIES,
+ OPTION_CAP_DROP,
+ OPTION_CGROUP_NS_MODE,
+ OPTION_CGROUP_PARENT,
+ OPTION_COMMAND,
+ OPTION_CPU_PERIOD,
+ OPTION_CPU_QUOTA,
+ OPTION_CPUSET_CPUS,
+ OPTION_CPUSET_MEMS,
+ OPTION_CPU_SHARES,
+ OPTION_ENTRYPOINT,
+ OPTION_CPUS,
+ OPTION_DETACH_INTERACTIVE,
+ OPTION_DEVICES,
+ OPTION_DEVICE_READ_BPS,
+ OPTION_DEVICE_WRITE_BPS,
+ OPTION_DEVICE_READ_IOPS,
+ OPTION_DEVICE_WRITE_IOPS,
+ OPTION_DEVICE_REQUESTS,
+ OPTION_DNS_SERVERS,
+ OPTION_DNS_OPTS,
+ OPTION_DNS_SEARCH_DOMAINS,
+ OPTION_DOMAINNAME,
+ OPTION_ENVIRONMENT,
+ OPTION_ETC_HOSTS,
+ OPTION_GROUPS,
+ OPTION_HEALTHCHECK,
+ OPTION_HOSTNAME,
+ OPTION_IMAGE,
+ OPTION_INIT,
+ OPTION_IPC_MODE,
+ OPTION_KERNEL_MEMORY,
+ OPTION_LABELS,
+ OPTION_LINKS,
+ OPTION_LOG_DRIVER_OPTIONS,
+ OPTION_MAC_ADDRESS,
+ OPTION_MEMORY,
+ OPTION_MEMORY_RESERVATION,
+ OPTION_MEMORY_SWAP,
+ OPTION_MEMORY_SWAPPINESS,
+ OPTION_STOP_TIMEOUT,
+ OPTION_NETWORK,
+ OPTION_OOM_KILLER,
+ OPTION_OOM_SCORE_ADJ,
+ OPTION_PID_MODE,
+ OPTION_PIDS_LIMIT,
+ OPTION_PLATFORM,
+ OPTION_PRIVILEGED,
+ OPTION_READ_ONLY,
+ OPTION_RESTART_POLICY,
+ OPTION_RUNTIME,
+ OPTION_SECURITY_OPTS,
+ OPTION_SHM_SIZE,
+ OPTION_STOP_SIGNAL,
+ OPTION_STORAGE_OPTS,
+ OPTION_SYSCTLS,
+ OPTION_TMPFS,
+ OPTION_TTY,
+ OPTION_ULIMITS,
+ OPTION_USER,
+ OPTION_USERNS_MODE,
+ OPTION_UTS,
+ OPTION_VOLUME_DRIVER,
+ OPTION_VOLUMES_FROM,
+ OPTION_WORKING_DIR,
+ OPTION_MOUNTS_VOLUMES,
+ OPTION_PORTS,
+ OPTIONS,
+ Engine,
+ EngineDriver,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ normalize_healthcheck_test,
+ omit_none_from_dict,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
+ convert_port_bindings,
+ normalize_links,
+)
+
+
+_DEFAULT_IP_REPLACEMENT_STRING = '[[DEFAULT_IP:iewahhaeB4Sae6Aen8IeShairoh4zeph7xaekoh8Geingunaesaeweiy3ooleiwi]]'
+
+
+_MOUNT_OPTION_TYPES = dict(
+ volume_driver='volume',
+ volume_options='volume',
+ propagation='bind',
+ no_copy='volume',
+ labels='volume',
+ tmpfs_size='tmpfs',
+ tmpfs_mode='tmpfs',
+)
+
+
+def _get_ansible_type(type):
+ if type == 'set':
+ return 'list'
+ if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'):
+ raise Exception('Invalid type "%s"' % (type, ))
+ return type
+
+
+_SENTRY = object()
+
+
+class DockerAPIEngineDriver(EngineDriver):
+ name = 'docker_api'
+
+ def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None):
+ argument_spec = argument_spec or {}
+ mutually_exclusive = mutually_exclusive or []
+ required_together = required_together or []
+ required_one_of = required_one_of or []
+ required_if = required_if or []
+ required_by = required_by or {}
+
+ active_options = []
+ option_minimal_versions = {}
+ for options in OPTIONS:
+ if not options.supports_engine(self.name):
+ continue
+
+ mutually_exclusive.extend(options.ansible_mutually_exclusive)
+ required_together.extend(options.ansible_required_together)
+ required_one_of.extend(options.ansible_required_one_of)
+ required_if.extend(options.ansible_required_if)
+ required_by.update(options.ansible_required_by)
+ argument_spec.update(options.argument_spec)
+
+ engine = options.get_engine(self.name)
+ if engine.min_api_version is not None:
+ for option in options.options:
+ if not option.not_an_ansible_option:
+ option_minimal_versions[option.name] = {'docker_api_version': engine.min_api_version}
+
+ active_options.append(options)
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_one_of=required_one_of,
+ required_if=required_if,
+ required_by=required_by,
+ option_minimal_versions=option_minimal_versions,
+ supports_check_mode=True,
+ )
+
+ return client.module, active_options, client
+
+ def get_api_version(self, client):
+ return client.docker_api_version
+
+ def get_container_id(self, container):
+ return container['Id']
+
+ def get_image_from_container(self, container):
+ return container['Image']
+
+ def get_image_name_from_container(self, container):
+ return container['Config'].get('Image')
+
+ def is_container_removing(self, container):
+ if container.get('State'):
+ return container['State'].get('Status') == 'removing'
+ return False
+
+ def is_container_running(self, container):
+ if container.get('State'):
+ if container['State'].get('Running') and not container['State'].get('Ghost', False):
+ return True
+ return False
+
+ def is_container_paused(self, container):
+ if container.get('State'):
+ return container['State'].get('Paused', False)
+ return False
+
+ def inspect_container_by_name(self, client, container_name):
+ return client.get_container(container_name)
+
+ def inspect_container_by_id(self, client, container_id):
+ return client.get_container_by_id(container_id)
+
+ def inspect_image_by_id(self, client, image_id):
+ return client.find_image_by_id(image_id)
+
+ def inspect_image_by_name(self, client, repository, tag):
+ return client.find_image(repository, tag)
+
+ def pull_image(self, client, repository, tag, platform=None):
+ return client.pull_image(repository, tag, platform=platform)
+
+ def pause_container(self, client, container_id):
+ client.post_call('/containers/{0}/pause', container_id)
+
+ def unpause_container(self, client, container_id):
+ client.post_call('/containers/{0}/unpause', container_id)
+
+ def disconnect_container_from_network(self, client, container_id, network_id):
+ client.post_json('/networks/{0}/disconnect', network_id, data={'Container': container_id})
+
+ def connect_container_to_network(self, client, container_id, network_id, parameters=None):
+ parameters = (parameters or {}).copy()
+ params = {}
+ for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items():
+ value = parameters.pop(para, None)
+ if value:
+ if para == 'links':
+ value = normalize_links(value)
+ params[dest_para] = value
+ if parameters:
+ raise Exception(
+ 'Unknown parameter(s) for connect_container_to_network for Docker API driver: %s' % (', '.join(['"%s"' % p for p in sorted(parameters)])))
+ ipam_config = {}
+ for param in ('IPv4Address', 'IPv6Address'):
+ if param in params:
+ ipam_config[param] = params.pop(param)
+ if ipam_config:
+ params['IPAMConfig'] = ipam_config
+ data = {
+ 'Container': container_id,
+ 'EndpointConfig': params,
+ }
+ client.post_json('/networks/{0}/connect', network_id, data=data)
+
+ def create_container(self, client, container_name, create_parameters):
+ params = {'name': container_name}
+ if 'platform' in create_parameters:
+ params['platform'] = create_parameters.pop('platform')
+ new_container = client.post_json_to_json('/containers/create', data=create_parameters, params=params)
+ client.report_warnings(new_container)
+ return new_container['Id']
+
+ def start_container(self, client, container_id):
+ client.post_json('/containers/{0}/start', container_id)
+
+ def wait_for_container(self, client, container_id, timeout=None):
+ return client.post_json_to_json('/containers/{0}/wait', container_id, timeout=timeout)['StatusCode']
+
+ def get_container_output(self, client, container_id):
+ config = client.get_json('/containers/{0}/json', container_id)
+ logging_driver = config['HostConfig']['LogConfig']['Type']
+ if logging_driver in ('json-file', 'journald', 'local'):
+ params = {
+ 'stderr': 1,
+ 'stdout': 1,
+ 'timestamps': 0,
+ 'follow': 0,
+ 'tail': 'all',
+ }
+ res = client._get(client._url('/containers/{0}/logs', container_id), params=params)
+ output = client._get_result_tty(False, res, config['Config']['Tty'])
+ return output, True
+ else:
+ return "Result logged using `%s` driver" % logging_driver, False
+
+ def update_container(self, client, container_id, update_parameters):
+ result = client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters)
+ client.report_warnings(result)
+
+ def restart_container(self, client, container_id, timeout=None):
+ client_timeout = client.timeout
+ if client_timeout is not None:
+ client_timeout += timeout or 10
+ client.post_call('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout)
+
+ def kill_container(self, client, container_id, kill_signal=None):
+ params = {}
+ if kill_signal is not None:
+ params['signal'] = kill_signal
+ client.post_call('/containers/{0}/kill', container_id, params=params)
+
+ def stop_container(self, client, container_id, timeout=None):
+ if timeout:
+ params = {'t': timeout}
+ else:
+ params = {}
+ timeout = 10
+ client_timeout = client.timeout
+ if client_timeout is not None:
+ client_timeout += timeout
+ count = 0
+ while True:
+ try:
+ client.post_call('/containers/{0}/stop', container_id, params=params, timeout=client_timeout)
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ raise Exception('%s [tried to unpause three times]' % to_native(exc))
+ count += 1
+ # Unpause
+ try:
+ self.unpause_container(client, container_id)
+ except Exception as exc2:
+ raise Exception('%s [while unpausing]' % to_native(exc2))
+ # Now try again
+ continue
+ raise
+ # We only loop when explicitly requested by 'continue'
+ break
+
+ def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False):
+ params = {'v': remove_volumes, 'link': link, 'force': force}
+ count = 0
+ while True:
+ try:
+ client.delete_call('/containers/{0}', container_id, params=params)
+ except NotFound as dummy:
+ pass
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ raise Exception('%s [tried to unpause three times]' % to_native(exc))
+ count += 1
+ # Unpause
+ try:
+ self.unpause_container(client, container_id)
+ except Exception as exc2:
+ raise Exception('%s [while unpausing]' % to_native(exc2))
+ # Now try again
+ continue
+ if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
+ pass
+ else:
+ raise
+ # We only loop when explicitly requested by 'continue'
+ break
+
+ def run(self, runner, client):
+ try:
+ runner()
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+class DockerAPIEngine(Engine):
+ def __init__(
+ self,
+ get_value,
+ preprocess_value=None,
+ get_expected_values=None,
+ ignore_mismatching_result=None,
+ set_value=None,
+ update_value=None,
+ can_set_value=None,
+ can_update_value=None,
+ min_api_version=None,
+ ):
+ self.min_api_version = min_api_version
+ self.min_api_version_obj = None if min_api_version is None else LooseVersion(min_api_version)
+ self.get_value = get_value
+ self.set_value = set_value
+ self.get_expected_values = get_expected_values or (lambda module, client, api_version, options, image, values: values)
+ self.ignore_mismatching_result = ignore_mismatching_result or \
+ (lambda module, client, api_version, option, image, container_value, expected_value: False)
+ self.preprocess_value = preprocess_value or (lambda module, client, api_version, options, values: values)
+ self.update_value = update_value
+ self.can_set_value = can_set_value or (lambda api_version: set_value is not None)
+ self.can_update_value = can_update_value or (lambda api_version: update_value is not None)
+
+ @classmethod
+ def config_value(
+ cls,
+ config_name,
+ postprocess_for_get=None,
+ preprocess_for_set=None,
+ get_expected_value=None,
+ ignore_mismatching_result=None,
+ min_api_version=None,
+ preprocess_value=None,
+ update_parameter=None,
+ ):
+ def preprocess_value_(module, client, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('config_value can only be used for a single option')
+ if preprocess_value is not None and options[0].name in values:
+ value = preprocess_value(module, client, api_version, values[options[0].name])
+ if value is None:
+ del values[options[0].name]
+ else:
+ values[options[0].name] = value
+ return values
+
+ def get_value(module, container, api_version, options):
+ if len(options) != 1:
+ raise AssertionError('config_value can only be used for a single option')
+ value = container['Config'].get(config_name, _SENTRY)
+ if postprocess_for_get:
+ value = postprocess_for_get(module, api_version, value, _SENTRY)
+ if value is _SENTRY:
+ return {}
+ return {options[0].name: value}
+
+ get_expected_values_ = None
+ if get_expected_value:
+ def get_expected_values_(module, client, api_version, options, image, values):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ value = values.get(options[0].name, _SENTRY)
+ value = get_expected_value(module, client, api_version, image, value, _SENTRY)
+ if value is _SENTRY:
+ return values
+ return {options[0].name: value}
+
+ def set_value(module, data, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('config_value can only be used for a single option')
+ if options[0].name not in values:
+ return
+ value = values[options[0].name]
+ if preprocess_for_set:
+ value = preprocess_for_set(module, api_version, value)
+ data[config_name] = value
+
+ update_value = None
+ if update_parameter:
+ def update_value(module, data, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('update_parameter can only be used for a single option')
+ if options[0].name not in values:
+ return
+ value = values[options[0].name]
+ if preprocess_for_set:
+ value = preprocess_for_set(module, api_version, value)
+ data[update_parameter] = value
+
+ return cls(
+ get_value=get_value,
+ preprocess_value=preprocess_value_,
+ get_expected_values=get_expected_values_,
+ ignore_mismatching_result=ignore_mismatching_result,
+ set_value=set_value,
+ min_api_version=min_api_version,
+ update_value=update_value,
+ )
+
+ @classmethod
+ def host_config_value(
+ cls,
+ host_config_name,
+ postprocess_for_get=None,
+ preprocess_for_set=None,
+ get_expected_value=None,
+ ignore_mismatching_result=None,
+ min_api_version=None,
+ preprocess_value=None,
+ update_parameter=None,
+ ):
+ def preprocess_value_(module, client, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ if preprocess_value is not None and options[0].name in values:
+ value = preprocess_value(module, client, api_version, values[options[0].name])
+ if value is None:
+ del values[options[0].name]
+ else:
+ values[options[0].name] = value
+ return values
+
+ def get_value(module, container, api_version, options):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ value = container['HostConfig'].get(host_config_name, _SENTRY)
+ if postprocess_for_get:
+ value = postprocess_for_get(module, api_version, value, _SENTRY)
+ if value is _SENTRY:
+ return {}
+ return {options[0].name: value}
+
+ get_expected_values_ = None
+ if get_expected_value:
+ def get_expected_values_(module, client, api_version, options, image, values):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ value = values.get(options[0].name, _SENTRY)
+ value = get_expected_value(module, client, api_version, image, value, _SENTRY)
+ if value is _SENTRY:
+ return values
+ return {options[0].name: value}
+
+ def set_value(module, data, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ if options[0].name not in values:
+ return
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ value = values[options[0].name]
+ if preprocess_for_set:
+ value = preprocess_for_set(module, api_version, value)
+ data['HostConfig'][host_config_name] = value
+
+ update_value = None
+ if update_parameter:
+ def update_value(module, data, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('update_parameter can only be used for a single option')
+ if options[0].name not in values:
+ return
+ value = values[options[0].name]
+ if preprocess_for_set:
+ value = preprocess_for_set(module, api_version, value)
+ data[update_parameter] = value
+
+ return cls(
+ get_value=get_value,
+ preprocess_value=preprocess_value_,
+ get_expected_values=get_expected_values_,
+ ignore_mismatching_result=ignore_mismatching_result,
+ set_value=set_value,
+ min_api_version=min_api_version,
+ update_value=update_value,
+ )
+
+
+def _is_volume_permissions(mode):
+ for part in mode.split(','):
+ if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
+ return False
+ return True
+
+
+def _normalize_port(port):
+ if '/' not in port:
+ return port + '/tcp'
+ return port
+
+
+def _get_default_host_ip(module, client):
+ if module.params['default_host_ip'] is not None:
+ return module.params['default_host_ip']
+ ip = '0.0.0.0'
+ for network_data in module.params['networks'] or []:
+ if network_data.get('name'):
+ network = client.get_network(network_data['name'])
+ if network is None:
+ client.fail(
+ "Cannot inspect the network '{0}' to determine the default IP".format(network_data['name']),
+ )
+ if network.get('Driver') == 'bridge' and network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
+ ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
+ break
+ return ip
+
+
+def _get_value_detach_interactive(module, container, api_version, options):
+ attach_stdin = container['Config'].get('OpenStdin')
+ attach_stderr = container['Config'].get('AttachStderr')
+ attach_stdout = container['Config'].get('AttachStdout')
+ return {
+ 'interactive': bool(attach_stdin),
+ 'detach': not (attach_stderr and attach_stdout),
+ }
+
+
+def _set_value_detach_interactive(module, data, api_version, options, values):
+ interactive = values.get('interactive')
+ detach = values.get('detach')
+
+ data['AttachStdout'] = False
+ data['AttachStderr'] = False
+ data['AttachStdin'] = False
+ data['StdinOnce'] = False
+ data['OpenStdin'] = interactive
+ if not detach:
+ data['AttachStdout'] = True
+ data['AttachStderr'] = True
+ if interactive:
+ data['AttachStdin'] = True
+ data['StdinOnce'] = True
+
+
+def _get_expected_env_value(module, client, api_version, image, value, sentry):
+ expected_env = {}
+ if image and image['Config'].get('Env'):
+ for env_var in image['Config']['Env']:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ if value and value is not sentry:
+ for env_var in value:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ param_env = []
+ for key, env_value in expected_env.items():
+ param_env.append("%s=%s" % (key, env_value))
+ return param_env
+
+
+def _preprocess_cpus(module, client, api_version, value):
+ if value is not None:
+ value = int(round(value * 1E9))
+ return value
+
+
+def _preprocess_devices(module, client, api_version, value):
+ if not value:
+ return value
+ expected_devices = []
+ for device in value:
+ parts = device.split(':')
+ if len(parts) == 1:
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[0],
+ PathOnHost=parts[0]
+ ))
+ elif len(parts) == 2:
+ parts = device.split(':')
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ )
+ )
+ else:
+ expected_devices.append(
+ dict(
+ CgroupPermissions=parts[2],
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ ))
+ return expected_devices
+
+
+def _preprocess_rate_bps(module, client, api_version, value):
+ if not value:
+ return value
+ devices = []
+ for device in value:
+ devices.append({
+ 'Path': device['path'],
+ 'Rate': human_to_bytes(device['rate']),
+ })
+ return devices
+
+
+def _preprocess_rate_iops(module, client, api_version, value):
+ if not value:
+ return value
+ devices = []
+ for device in value:
+ devices.append({
+ 'Path': device['path'],
+ 'Rate': device['rate'],
+ })
+ return devices
+
+
+def _preprocess_device_requests(module, client, api_version, value):
+ if not value:
+ return value
+ device_requests = []
+ for dr in value:
+ device_requests.append({
+ 'Driver': dr['driver'],
+ 'Count': dr['count'],
+ 'DeviceIDs': dr['device_ids'],
+ 'Capabilities': dr['capabilities'],
+ 'Options': dr['options'],
+ })
+ return device_requests
+
+
+def _preprocess_etc_hosts(module, client, api_version, value):
+ if value is None:
+ return value
+ results = []
+ for key, value in value.items():
+ results.append('%s%s%s' % (key, ':', value))
+ return results
+
+
+def _preprocess_healthcheck(module, client, api_version, value):
+ if value is None:
+ return value
+ if not value or not value.get('test'):
+ value = {'test': ['NONE']}
+ elif 'test' in value:
+ value['test'] = normalize_healthcheck_test(value['test'])
+ return omit_none_from_dict({
+ 'Test': value.get('test'),
+ 'Interval': value.get('interval'),
+ 'Timeout': value.get('timeout'),
+ 'StartPeriod': value.get('start_period'),
+ 'Retries': value.get('retries'),
+ })
+
+
+def _postprocess_healthcheck_get_value(module, api_version, value, sentry):
+ if value is None or value is sentry or value.get('Test') == ['NONE']:
+ return {'Test': ['NONE']}
+ return value
+
+
+def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None):
+ if name not in values:
+ return values
+ try:
+ value = values[name]
+ if unlimited_value is not None and value in ('unlimited', str(unlimited_value)):
+ value = unlimited_value
+ else:
+ value = human_to_bytes(value)
+ values[name] = value
+ return values
+ except ValueError as exc:
+ module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
+
+
+def _get_image_labels(image):
+ if not image:
+ return {}
+
+ # Can't use get('Labels', {}) because 'Labels' may be present and be None
+ return image['Config'].get('Labels') or {}
+
+
+def _get_expected_labels_value(module, client, api_version, image, value, sentry):
+ if value is sentry:
+ return sentry
+ expected_labels = {}
+ if module.params['image_label_mismatch'] == 'ignore':
+ expected_labels.update(dict(_get_image_labels(image)))
+ expected_labels.update(value)
+ return expected_labels
+
+
+def _preprocess_links(module, client, api_version, value):
+ if value is None:
+ return None
+
+ result = []
+ for link in value:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 2:
+ link, alias = parsed_link
+ else:
+ link, alias = parsed_link[0], parsed_link[0]
+ result.append('/%s:/%s/%s' % (link, module.params['name'], alias))
+
+ return result
+
+
+def _ignore_mismatching_label_result(module, client, api_version, option, image, container_value, expected_value):
+ if option.comparison == 'strict' and module.params['image_label_mismatch'] == 'fail':
+ # If there are labels from the base image that should be removed and
+ # base_image_mismatch is fail we want raise an error.
+ image_labels = _get_image_labels(image)
+ would_remove_labels = []
+ labels_param = module.params['labels'] or {}
+ for label in image_labels:
+ if label not in labels_param:
+ # Format label for error message
+ would_remove_labels.append('"%s"' % (label, ))
+ if would_remove_labels:
+ msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore"
+ " this error. Labels: {0}")
+ client.fail(msg.format(', '.join(would_remove_labels)))
+ return False
+
+
+def _ignore_mismatching_network_result(module, client, api_version, option, image, container_value, expected_value):
+ # 'networks' is handled out-of-band
+ if option.name == 'networks':
+ return True
+ return False
+
+
+def _preprocess_network_values(module, client, api_version, options, values):
+ if 'networks' in values:
+ for network in values['networks']:
+ network['id'] = _get_network_id(module, client, network['name'])
+ if not network['id']:
+ client.fail("Parameter error: network named %s could not be found. Does it exist?" % (network['name'], ))
+
+ if 'network_mode' in values:
+ values['network_mode'] = _preprocess_container_names(module, client, api_version, values['network_mode'])
+
+ return values
+
+
+def _get_network_id(module, client, network_name):
+ try:
+ network_id = None
+ params = {'filters': json.dumps({'name': [network_name]})}
+ for network in client.get_json('/networks', params=params):
+ if network['Name'] == network_name:
+ network_id = network['Id']
+ break
+ return network_id
+ except Exception as exc:
+ client.fail("Error getting network id for %s - %s" % (network_name, to_native(exc)))
+
+
+def _get_values_network(module, container, api_version, options):
+ value = container['HostConfig'].get('NetworkMode', _SENTRY)
+ if value is _SENTRY:
+ return {}
+ return {'network_mode': value}
+
+
+def _set_values_network(module, data, api_version, options, values):
+ if 'network_mode' not in values:
+ return
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ value = values['network_mode']
+ data['HostConfig']['NetworkMode'] = value
+
+
+def _get_values_mounts(module, container, api_version, options):
+ volumes = container['Config'].get('Volumes')
+ binds = container['HostConfig'].get('Binds')
+ # According to https://github.com/moby/moby/, support for HostConfig.Mounts
+ # has been included at least since v17.03.0-ce, which has API version 1.26.
+ # The previous tag, v1.9.1, has API version 1.21 and does not have
+ # HostConfig.Mounts. I have no idea what about API 1.25...
+ mounts = container['HostConfig'].get('Mounts')
+ if mounts is not None:
+ result = []
+ empty_dict = {}
+ for mount in mounts:
+ result.append({
+ 'type': mount.get('Type'),
+ 'source': mount.get('Source'),
+ 'target': mount.get('Target'),
+ 'read_only': mount.get('ReadOnly', False), # golang's omitempty for bool returns None for False
+ 'consistency': mount.get('Consistency'),
+ 'propagation': mount.get('BindOptions', empty_dict).get('Propagation'),
+ 'no_copy': mount.get('VolumeOptions', empty_dict).get('NoCopy', False),
+ 'labels': mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict),
+ 'volume_driver': mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name'),
+ 'volume_options': mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict),
+ 'tmpfs_size': mount.get('TmpfsOptions', empty_dict).get('SizeBytes'),
+ 'tmpfs_mode': mount.get('TmpfsOptions', empty_dict).get('Mode'),
+ })
+ mounts = result
+ result = {}
+ if volumes is not None:
+ result['volumes'] = volumes
+ if binds is not None:
+ result['volume_binds'] = binds
+ if mounts is not None:
+ result['mounts'] = mounts
+ return result
+
+
+def _get_bind_from_dict(volume_dict):
+ results = []
+ if volume_dict:
+ for host_path, config in volume_dict.items():
+ if isinstance(config, dict) and config.get('bind'):
+ container_path = config.get('bind')
+ mode = config.get('mode', 'rw')
+ results.append("%s:%s:%s" % (host_path, container_path, mode))
+ return results
+
+
+def _get_image_binds(volumes):
+ '''
+ Convert array of binds to array of strings with format host_path:container_path:mode
+
+ :param volumes: array of bind dicts
+ :return: array of strings
+ '''
+ results = []
+ if isinstance(volumes, dict):
+ results += _get_bind_from_dict(volumes)
+ elif isinstance(volumes, list):
+ for vol in volumes:
+ results += _get_bind_from_dict(vol)
+ return results
+
+
+def _get_expected_values_mounts(module, client, api_version, options, image, values):
+ expected_values = {}
+
+ # binds
+ if 'mounts' in values:
+ expected_values['mounts'] = values['mounts']
+
+ # volumes
+ expected_vols = dict()
+ if image and image['Config'].get('Volumes'):
+ expected_vols.update(image['Config'].get('Volumes'))
+ if 'volumes' in values:
+ for vol in values['volumes']:
+ # We only expect anonymous volumes to show up in the list
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not _is_volume_permissions(parts[1]):
+ continue
+ expected_vols[vol] = {}
+ if expected_vols:
+ expected_values['volumes'] = expected_vols
+
+ # binds
+ image_vols = []
+ if image:
+ image_vols = _get_image_binds(image['Config'].get('Volumes'))
+ param_vols = []
+ if 'volume_binds' in values:
+ param_vols = values['volume_binds']
+ expected_values['volume_binds'] = list(set(image_vols + param_vols))
+
+ return expected_values
+
+
+def _set_values_mounts(module, data, api_version, options, values):
+ if 'mounts' in values:
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ mounts = []
+ for mount in values['mounts']:
+ mount_type = mount.get('type')
+ mount_res = {
+ 'Target': mount.get('target'),
+ 'Source': mount.get('source'),
+ 'Type': mount_type,
+ 'ReadOnly': mount.get('read_only'),
+ }
+ if 'consistency' in mount:
+ mount_res['Consistency'] = mount['consistency']
+ if mount_type == 'bind':
+ if 'propagation' in mount:
+ mount_res['BindOptions'] = {
+ 'Propagation': mount['propagation'],
+ }
+ if mount_type == 'volume':
+ volume_opts = {}
+ if mount.get('no_copy'):
+ volume_opts['NoCopy'] = True
+ if mount.get('labels'):
+ volume_opts['Labels'] = mount.get('labels')
+ if mount.get('volume_driver'):
+ driver_config = {
+ 'Name': mount.get('volume_driver'),
+ }
+ if mount.get('volume_options'):
+ driver_config['Options'] = mount.get('volume_options')
+ volume_opts['DriverConfig'] = driver_config
+ if volume_opts:
+ mount_res['VolumeOptions'] = volume_opts
+ if mount_type == 'tmpfs':
+ tmpfs_opts = {}
+ if mount.get('tmpfs_mode'):
+ tmpfs_opts['Mode'] = mount.get('tmpfs_mode')
+ if mount.get('tmpfs_size'):
+ tmpfs_opts['SizeBytes'] = mount.get('tmpfs_size')
+ if tmpfs_opts:
+ mount_res['TmpfsOptions'] = tmpfs_opts
+ mounts.append(mount_res)
+ data['HostConfig']['Mounts'] = mounts
+ if 'volumes' in values:
+ volumes = {}
+ for volume in values['volumes']:
+ # Only pass anonymous volumes to create container
+ if ':' in volume:
+ parts = volume.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not _is_volume_permissions(parts[1]):
+ continue
+ volumes[volume] = {}
+ data['Volumes'] = volumes
+ if 'volume_binds' in values:
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['Binds'] = values['volume_binds']
+
+
+def _get_values_log(module, container, api_version, options):
+ log_config = container['HostConfig'].get('LogConfig') or {}
+ return {
+ 'log_driver': log_config.get('Type'),
+ 'log_options': log_config.get('Config'),
+ }
+
+
+def _set_values_log(module, data, api_version, options, values):
+ if 'log_driver' not in values:
+ return
+ log_config = {
+ 'Type': values['log_driver'],
+ 'Config': values.get('log_options') or {},
+ }
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['LogConfig'] = log_config
+
+
+def _get_values_platform(module, container, api_version, options):
+ return {
+ 'platform': container.get('Platform'),
+ }
+
+
+def _set_values_platform(module, data, api_version, options, values):
+ if 'platform' in values:
+ data['platform'] = values['platform']
+
+
+def _get_values_restart(module, container, api_version, options):
+ restart_policy = container['HostConfig'].get('RestartPolicy') or {}
+ return {
+ 'restart_policy': restart_policy.get('Name'),
+ 'restart_retries': restart_policy.get('MaximumRetryCount'),
+ }
+
+
+def _set_values_restart(module, data, api_version, options, values):
+ if 'restart_policy' not in values:
+ return
+ restart_policy = {
+ 'Name': values['restart_policy'],
+ 'MaximumRetryCount': values.get('restart_retries'),
+ }
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['RestartPolicy'] = restart_policy
+
+
+def _update_value_restart(module, data, api_version, options, values):
+ if 'restart_policy' not in values:
+ return
+ data['RestartPolicy'] = {
+ 'Name': values['restart_policy'],
+ 'MaximumRetryCount': values.get('restart_retries'),
+ }
+
+
+def _get_values_ports(module, container, api_version, options):
+ host_config = container['HostConfig']
+ config = container['Config']
+
+ # "ExposedPorts": null returns None type & causes AttributeError - PR #5517
+ if config.get('ExposedPorts') is not None:
+ expected_exposed = [_normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
+ else:
+ expected_exposed = []
+
+ return {
+ 'published_ports': host_config.get('PortBindings'),
+ 'exposed_ports': expected_exposed,
+ 'publish_all_ports': host_config.get('PublishAllPorts'),
+ }
+
+
+def _get_expected_values_ports(module, client, api_version, options, image, values):
+ expected_values = {}
+
+ if 'published_ports' in values:
+ expected_bound_ports = {}
+ for container_port, config in values['published_ports'].items():
+ if isinstance(container_port, int):
+ container_port = "%s/tcp" % container_port
+ if len(config) == 1:
+ if isinstance(config[0], int):
+ expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
+ elif isinstance(config[0], tuple):
+ expected_bound_ports[container_port] = []
+ for host_ip, host_port in config:
+ expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')})
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}]
+ expected_values['published_ports'] = expected_bound_ports
+
+ image_ports = []
+ if image:
+ image_exposed_ports = image['Config'].get('ExposedPorts') or {}
+ image_ports = [_normalize_port(p) for p in image_exposed_ports]
+ param_ports = []
+ if 'ports' in values:
+ param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in values['ports']]
+ result = list(set(image_ports + param_ports))
+ expected_values['exposed_ports'] = result
+
+ if 'publish_all_ports' in values:
+ expected_values['publish_all_ports'] = values['publish_all_ports']
+
+ return expected_values
+
+
+def _set_values_ports(module, data, api_version, options, values):
+ if 'ports' in values:
+ exposed_ports = {}
+ for port_definition in values['ports']:
+ port = port_definition
+ proto = 'tcp'
+ if isinstance(port_definition, tuple):
+ if len(port_definition) == 2:
+ proto = port_definition[1]
+ port = port_definition[0]
+ exposed_ports['%s/%s' % (port, proto)] = {}
+ data['ExposedPorts'] = exposed_ports
+ if 'published_ports' in values:
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['PortBindings'] = convert_port_bindings(values['published_ports'])
+ if 'publish_all_ports' in values and values['publish_all_ports']:
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['PublishAllPorts'] = values['publish_all_ports']
+
+
+def _preprocess_value_ports(module, client, api_version, options, values):
+ if 'published_ports' not in values:
+ return values
+ found = False
+ for port_spec in values['published_ports'].values():
+ if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING:
+ found = True
+ break
+ if not found:
+ return values
+ default_ip = _get_default_host_ip(module, client)
+ for port, port_spec in values['published_ports'].items():
+ if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING:
+ values['published_ports'][port] = tuple([default_ip] + list(port_spec[1:]))
+ return values
+
+
+def _preprocess_container_names(module, client, api_version, value):
+ if value is None or not value.startswith('container:'):
+ return value
+ container_name = value[len('container:'):]
+ # Try to inspect container to see whether this is an ID or a
+ # name (and in the latter case, retrieve its ID)
+ container = client.get_container(container_name)
+ if container is None:
+ # If we can't find the container, issue a warning and continue with
+ # what the user specified.
+ module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
+ return value
+ return 'container:{0}'.format(container['Id'])
+
+
+OPTION_AUTO_REMOVE.add_engine('docker_api', DockerAPIEngine.host_config_value('AutoRemove'))
+
+OPTION_BLKIO_WEIGHT.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioWeight', update_parameter='BlkioWeight'))
+
+OPTION_CAPABILITIES.add_engine('docker_api', DockerAPIEngine.host_config_value('CapAdd'))
+
+OPTION_CAP_DROP.add_engine('docker_api', DockerAPIEngine.host_config_value('CapDrop'))
+
+OPTION_CGROUP_NS_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('CgroupnsMode', min_api_version='1.41'))
+
+OPTION_CGROUP_PARENT.add_engine('docker_api', DockerAPIEngine.host_config_value('CgroupParent'))
+
+OPTION_COMMAND.add_engine('docker_api', DockerAPIEngine.config_value('Cmd'))
+
+OPTION_CPU_PERIOD.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuPeriod', update_parameter='CpuPeriod'))
+
+OPTION_CPU_QUOTA.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuQuota', update_parameter='CpuQuota'))
+
+OPTION_CPUSET_CPUS.add_engine('docker_api', DockerAPIEngine.host_config_value('CpusetCpus', update_parameter='CpusetCpus'))
+
+OPTION_CPUSET_MEMS.add_engine('docker_api', DockerAPIEngine.host_config_value('CpusetMems', update_parameter='CpusetMems'))
+
+OPTION_CPU_SHARES.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuShares', update_parameter='CpuShares'))
+
+OPTION_ENTRYPOINT.add_engine('docker_api', DockerAPIEngine.config_value('Entrypoint'))
+
+OPTION_CPUS.add_engine('docker_api', DockerAPIEngine.host_config_value('NanoCpus', preprocess_value=_preprocess_cpus))
+
+OPTION_DETACH_INTERACTIVE.add_engine('docker_api', DockerAPIEngine(get_value=_get_value_detach_interactive, set_value=_set_value_detach_interactive))
+
+OPTION_DEVICES.add_engine('docker_api', DockerAPIEngine.host_config_value('Devices', preprocess_value=_preprocess_devices))
+
+OPTION_DEVICE_READ_BPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceReadBps', preprocess_value=_preprocess_rate_bps))
+
+OPTION_DEVICE_WRITE_BPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', preprocess_value=_preprocess_rate_bps))
+
+OPTION_DEVICE_READ_IOPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', preprocess_value=_preprocess_rate_iops))
+
+OPTION_DEVICE_WRITE_IOPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', preprocess_value=_preprocess_rate_iops))
+
+OPTION_DEVICE_REQUESTS.add_engine('docker_api', DockerAPIEngine.host_config_value(
+ 'DeviceRequests', min_api_version='1.40', preprocess_value=_preprocess_device_requests))
+
+OPTION_DNS_SERVERS.add_engine('docker_api', DockerAPIEngine.host_config_value('Dns'))
+
+OPTION_DNS_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('DnsOptions'))
+
+OPTION_DNS_SEARCH_DOMAINS.add_engine('docker_api', DockerAPIEngine.host_config_value('DnsSearch'))
+
+OPTION_DOMAINNAME.add_engine('docker_api', DockerAPIEngine.config_value('Domainname'))
+
+OPTION_ENVIRONMENT.add_engine('docker_api', DockerAPIEngine.config_value('Env', get_expected_value=_get_expected_env_value))
+
+OPTION_ETC_HOSTS.add_engine('docker_api', DockerAPIEngine.host_config_value('ExtraHosts', preprocess_value=_preprocess_etc_hosts))
+
+OPTION_GROUPS.add_engine('docker_api', DockerAPIEngine.host_config_value('GroupAdd'))
+
+OPTION_HEALTHCHECK.add_engine('docker_api', DockerAPIEngine.config_value(
+ 'Healthcheck', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value))
+
+OPTION_HOSTNAME.add_engine('docker_api', DockerAPIEngine.config_value('Hostname'))
+
+OPTION_IMAGE.add_engine('docker_api', DockerAPIEngine.config_value(
+ 'Image', ignore_mismatching_result=lambda module, client, api_version, option, image, container_value, expected_value: True))
+
+OPTION_INIT.add_engine('docker_api', DockerAPIEngine.host_config_value('Init'))
+
+OPTION_IPC_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('IpcMode', preprocess_value=_preprocess_container_names))
+
+OPTION_KERNEL_MEMORY.add_engine('docker_api', DockerAPIEngine.host_config_value('KernelMemory', update_parameter='KernelMemory'))
+
+OPTION_LABELS.add_engine('docker_api', DockerAPIEngine.config_value(
+ 'Labels', get_expected_value=_get_expected_labels_value, ignore_mismatching_result=_ignore_mismatching_label_result))
+
+OPTION_LINKS.add_engine('docker_api', DockerAPIEngine.host_config_value('Links', preprocess_value=_preprocess_links))
+
+OPTION_LOG_DRIVER_OPTIONS.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_log,
+ set_value=_set_values_log,
+))
+
+OPTION_MAC_ADDRESS.add_engine('docker_api', DockerAPIEngine.config_value('MacAddress'))
+
+OPTION_MEMORY.add_engine('docker_api', DockerAPIEngine.host_config_value('Memory', update_parameter='Memory'))
+
+OPTION_MEMORY_RESERVATION.add_engine('docker_api', DockerAPIEngine.host_config_value('MemoryReservation', update_parameter='MemoryReservation'))
+
+OPTION_MEMORY_SWAP.add_engine('docker_api', DockerAPIEngine.host_config_value('MemorySwap', update_parameter='MemorySwap'))
+
+OPTION_MEMORY_SWAPPINESS.add_engine('docker_api', DockerAPIEngine.host_config_value('MemorySwappiness'))
+
+OPTION_STOP_TIMEOUT.add_engine('docker_api', DockerAPIEngine.config_value('StopTimeout'))
+
+OPTION_NETWORK.add_engine('docker_api', DockerAPIEngine(
+ preprocess_value=_preprocess_network_values,
+ get_value=_get_values_network,
+ set_value=_set_values_network,
+ ignore_mismatching_result=_ignore_mismatching_network_result,
+))
+
+OPTION_OOM_KILLER.add_engine('docker_api', DockerAPIEngine.host_config_value('OomKillDisable'))
+
+OPTION_OOM_SCORE_ADJ.add_engine('docker_api', DockerAPIEngine.host_config_value('OomScoreAdj'))
+
+OPTION_PID_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('PidMode', preprocess_value=_preprocess_container_names))
+
+OPTION_PIDS_LIMIT.add_engine('docker_api', DockerAPIEngine.host_config_value('PidsLimit'))
+
+OPTION_PLATFORM.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_platform,
+ set_value=_set_values_platform,
+ min_api_version='1.41',
+))
+
+OPTION_PRIVILEGED.add_engine('docker_api', DockerAPIEngine.host_config_value('Privileged'))
+
+OPTION_READ_ONLY.add_engine('docker_api', DockerAPIEngine.host_config_value('ReadonlyRootfs'))
+
+OPTION_RESTART_POLICY.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_restart,
+ set_value=_set_values_restart,
+ update_value=_update_value_restart,
+))
+
+OPTION_RUNTIME.add_engine('docker_api', DockerAPIEngine.host_config_value('Runtime'))
+
+OPTION_SECURITY_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('SecurityOpt'))
+
+OPTION_SHM_SIZE.add_engine('docker_api', DockerAPIEngine.host_config_value('ShmSize'))
+
+OPTION_STOP_SIGNAL.add_engine('docker_api', DockerAPIEngine.config_value('StopSignal'))
+
+OPTION_STORAGE_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('StorageOpt'))
+
+OPTION_SYSCTLS.add_engine('docker_api', DockerAPIEngine.host_config_value('Sysctls'))
+
+OPTION_TMPFS.add_engine('docker_api', DockerAPIEngine.host_config_value('Tmpfs'))
+
+OPTION_TTY.add_engine('docker_api', DockerAPIEngine.config_value('Tty'))
+
+OPTION_ULIMITS.add_engine('docker_api', DockerAPIEngine.host_config_value('Ulimits'))
+
+OPTION_USER.add_engine('docker_api', DockerAPIEngine.config_value('User'))
+
+OPTION_USERNS_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('UsernsMode'))
+
+OPTION_UTS.add_engine('docker_api', DockerAPIEngine.host_config_value('UTSMode'))
+
+OPTION_VOLUME_DRIVER.add_engine('docker_api', DockerAPIEngine.host_config_value('VolumeDriver'))
+
+OPTION_VOLUMES_FROM.add_engine('docker_api', DockerAPIEngine.host_config_value('VolumesFrom'))
+
+OPTION_WORKING_DIR.add_engine('docker_api', DockerAPIEngine.config_value('WorkingDir'))
+
+OPTION_MOUNTS_VOLUMES.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_mounts,
+ get_expected_values=_get_expected_values_mounts,
+ set_value=_set_values_mounts,
+))
+
+OPTION_PORTS.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_ports,
+ get_expected_values=_get_expected_values_ports,
+ set_value=_set_values_ports,
+ preprocess_value=_preprocess_value_ports,
+))
diff --git a/ansible_collections/community/docker/plugins/module_utils/module_container/module.py b/ansible_collections/community/docker/plugins/module_utils/module_container/module.py
new file mode 100644
index 00000000..230dbfb4
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/module_container/module.py
@@ -0,0 +1,843 @@
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import re
+from time import sleep
+
+from ansible.module_utils.common.text.converters import to_native, to_text
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DifferenceTracker,
+ DockerBaseClass,
+ compare_generic,
+ is_image_name_id,
+ sanitize_result,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag
+
+
+class Container(DockerBaseClass):
+ def __init__(self, container, engine_driver):
+ super(Container, self).__init__()
+ self.raw = container
+ self.id = None
+ self.image = None
+ self.image_name = None
+ self.container = container
+ self.engine_driver = engine_driver
+ if container:
+ self.id = engine_driver.get_container_id(container)
+ self.image = engine_driver.get_image_from_container(container)
+ self.image_name = engine_driver.get_image_name_from_container(container)
+ self.log(self.container, pretty_print=True)
+
+ @property
+ def exists(self):
+ return True if self.container else False
+
+ @property
+ def removing(self):
+ return self.engine_driver.is_container_removing(self.container) if self.container else False
+
+ @property
+ def running(self):
+ return self.engine_driver.is_container_running(self.container) if self.container else False
+
+ @property
+ def paused(self):
+ return self.engine_driver.is_container_paused(self.container) if self.container else False
+
+
+class ContainerManager(DockerBaseClass):
+ def __init__(self, module, engine_driver, client, active_options):
+ self.module = module
+ self.engine_driver = engine_driver
+ self.client = client
+ self.options = active_options
+ self.all_options = self._collect_all_options(active_options)
+ self.check_mode = self.module.check_mode
+ self.param_cleanup = self.module.params['cleanup']
+ self.param_container_default_behavior = self.module.params['container_default_behavior']
+ self.param_default_host_ip = self.module.params['default_host_ip']
+ self.param_debug = self.module.params['debug']
+ self.param_force_kill = self.module.params['force_kill']
+ self.param_image = self.module.params['image']
+ self.param_image_comparison = self.module.params['image_comparison']
+ self.param_image_label_mismatch = self.module.params['image_label_mismatch']
+ self.param_image_name_mismatch = self.module.params['image_name_mismatch']
+ self.param_keep_volumes = self.module.params['keep_volumes']
+ self.param_kill_signal = self.module.params['kill_signal']
+ self.param_name = self.module.params['name']
+ self.param_networks_cli_compatible = self.module.params['networks_cli_compatible']
+ self.param_output_logs = self.module.params['output_logs']
+ self.param_paused = self.module.params['paused']
+ self.param_pull = self.module.params['pull']
+ self.param_recreate = self.module.params['recreate']
+ self.param_removal_wait_timeout = self.module.params['removal_wait_timeout']
+ self.param_restart = self.module.params['restart']
+ self.param_state = self.module.params['state']
+ self._parse_comparisons()
+ self._update_params()
+ self.results = {'changed': False, 'actions': []}
+ self.diff = {}
+ self.diff_tracker = DifferenceTracker()
+ self.facts = {}
+ if self.param_default_host_ip:
+ valid_ip = False
+ if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip):
+ valid_ip = True
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip):
+ valid_ip = True
+ if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip):
+ self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip)
+ valid_ip = True
+ if not valid_ip:
+ self.fail('The value of default_host_ip must be an empty string, an IPv4 address, '
+ 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip))
+
+ def _collect_all_options(self, active_options):
+ all_options = {}
+ for options in active_options:
+ for option in options.options:
+ all_options[option.name] = option
+ return all_options
+
+ def _collect_all_module_params(self):
+ all_module_options = set()
+ for option, data in self.module.argument_spec.items():
+ all_module_options.add(option)
+ if 'aliases' in data:
+ for alias in data['aliases']:
+ all_module_options.add(alias)
+ return all_module_options
+
+ def _parse_comparisons(self):
+ # Keep track of all module params and all option aliases
+ all_module_options = self._collect_all_module_params()
+ comp_aliases = {}
+ for option_name, option in self.all_options.items():
+ if option.not_an_ansible_option:
+ continue
+ comp_aliases[option_name] = option_name
+ for alias in option.ansible_aliases:
+ comp_aliases[alias] = option_name
+ # Process legacy ignore options
+ if self.module.params['ignore_image']:
+ self.all_options['image'].comparison = 'ignore'
+ if self.module.params['purge_networks']:
+ self.all_options['networks'].comparison = 'strict'
+ # Process comparsions specified by user
+ if self.module.params.get('comparisons'):
+ # If '*' appears in comparisons, process it first
+ if '*' in self.module.params['comparisons']:
+ value = self.module.params['comparisons']['*']
+ if value not in ('strict', 'ignore'):
+ self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
+ for option in self.all_options.values():
+ if option.name == 'networks':
+ # `networks` is special: only update if
+ # some value is actually specified
+ if self.module.params['networks'] is None:
+ continue
+ option.comparison = value
+ # Now process all other comparisons.
+ comp_aliases_used = {}
+ for key, value in self.module.params['comparisons'].items():
+ if key == '*':
+ continue
+ # Find main key
+ key_main = comp_aliases.get(key)
+ if key_main is None:
+ if key_main in all_module_options:
+ self.fail("The module option '%s' cannot be specified in the comparisons dict, "
+ "since it does not correspond to container's state!" % key)
+ if key not in self.all_options or self.all_options[key].not_an_ansible_option:
+ self.fail("Unknown module option '%s' in comparisons dict!" % key)
+ key_main = key
+ if key_main in comp_aliases_used:
+ self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
+ comp_aliases_used[key_main] = key
+ # Check value and update accordingly
+ if value in ('strict', 'ignore'):
+ self.all_options[key_main].comparison = value
+ elif value == 'allow_more_present':
+ if self.all_options[key_main].comparison_type == 'value':
+ self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
+ self.all_options[key_main].comparison = value
+ else:
+ self.fail("Unknown comparison mode '%s'!" % value)
+ # Copy values
+ for option in self.all_options.values():
+ if option.copy_comparison_from is not None:
+ option.comparison = self.all_options[option.copy_comparison_from].comparison
+ # Check legacy values
+ if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore':
+ self.module.warn('The ignore_image option has been overridden by the comparisons option!')
+ if self.module.params['purge_networks'] and self.all_options['networks'].comparison != 'strict':
+ self.module.warn('The purge_networks option has been overridden by the comparisons option!')
+
+ def _update_params(self):
+ if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None:
+ # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode
+ # (assuming no explicit value is specified for network_mode)
+ self.module.params['network_mode'] = self.module.params['networks'][0]['name']
+ if self.param_container_default_behavior == 'compatibility':
+ old_default_values = dict(
+ auto_remove=False,
+ detach=True,
+ init=False,
+ interactive=False,
+ memory='0',
+ paused=False,
+ privileged=False,
+ read_only=False,
+ tty=False,
+ )
+ for param, value in old_default_values.items():
+ if self.module.params[param] is None:
+ self.module.params[param] = value
+
+ def fail(self, *args, **kwargs):
+ self.client.fail(*args, **kwargs)
+
+ def run(self):
+ if self.param_state in ('stopped', 'started', 'present'):
+ self.present(self.param_state)
+ elif self.param_state == 'absent':
+ self.absent()
+
+ if not self.check_mode and not self.param_debug:
+ self.results.pop('actions')
+
+ if self.module._diff or self.param_debug:
+ self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff
+
+ if self.facts:
+ self.results['container'] = self.facts
+
+ def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
+ delay = 1.0
+ total_wait = 0
+ while True:
+ # Inspect container
+ result = self.engine_driver.inspect_container_by_id(self.client, container_id)
+ if result is None:
+ if accept_removal:
+ return
+ msg = 'Encontered vanished container while waiting for container "{0}"'
+ self.fail(msg.format(container_id))
+ # Check container state
+ state = result.get('State', {}).get('Status')
+ if complete_states is not None and state in complete_states:
+ return
+ if wait_states is not None and state not in wait_states:
+ msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
+ self.fail(msg.format(container_id, state))
+ # Wait
+ if max_wait is not None:
+ if total_wait > max_wait:
+ msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
+ self.fail(msg.format(container_id, max_wait))
+ if total_wait + delay > max_wait:
+ delay = max_wait - total_wait
+ sleep(delay)
+ total_wait += delay
+ # Exponential backoff, but never wait longer than 10 seconds
+ # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
+ # until the maximal 10 seconds delay is reached. By then, the
+ # code will have slept for ~1.5 minutes.)
+ delay = min(delay * 1.1, 10)
+
+ def _collect_params(self, active_options):
+ parameters = []
+ for options in active_options:
+ values = {}
+ engine = options.get_engine(self.engine_driver.name)
+ for option in options.all_options:
+ if not option.not_an_ansible_option and self.module.params[option.name] is not None:
+ values[option.name] = self.module.params[option.name]
+ values = options.preprocess(self.module, values)
+ engine.preprocess_value(self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, values)
+ parameters.append((options, values))
+ return parameters
+
+ def present(self, state):
+ self.parameters = self._collect_params(self.options)
+ container = self._get_container(self.param_name)
+ was_running = container.running
+ was_paused = container.paused
+ container_created = False
+
+ # If the image parameter was passed then we need to deal with the image
+ # version comparison. Otherwise we handle this depending on whether
+ # the container already runs or not; in the former case, in case the
+ # container needs to be restarted, we use the existing container's
+ # image ID.
+ image, comparison_image = self._get_image(container)
+ self.log(image, pretty_print=True)
+ if not container.exists or container.removing:
+ # New container
+ if container.removing:
+ self.log('Found container in removal phase')
+ else:
+ self.log('No container found')
+ if not self.param_image:
+ self.fail('Cannot create container when image is not specified!')
+ self.diff_tracker.add('exists', parameter=True, active=False)
+ if container.removing and not self.check_mode:
+ # Wait for container to be removed before trying to create it
+ self.wait_for_state(
+ container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout)
+ new_container = self.container_create(self.param_image)
+ if new_container:
+ container = new_container
+ container_created = True
+ else:
+ # Existing container
+ different, differences = self.has_different_configuration(container, comparison_image)
+ image_different = False
+ if self.all_options['image'].comparison == 'strict':
+ image_different = self._image_is_different(image, container)
+ if self.param_image_name_mismatch == 'recreate' and self.param_image is not None and self.param_image != container.image_name:
+ different = True
+ self.diff_tracker.add('image_name', parameter=self.param_image, active=container.image_name)
+ if image_different or different or self.param_recreate:
+ self.diff_tracker.merge(differences)
+ self.diff['differences'] = differences.get_legacy_docker_container_diffs()
+ if image_different:
+ self.diff['image_different'] = True
+ self.log("differences")
+ self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
+ image_to_use = self.param_image
+ if not image_to_use and container and container.image:
+ image_to_use = container.image
+ if not image_to_use:
+ self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
+ if container.running:
+ self.container_stop(container.id)
+ self.container_remove(container.id)
+ if not self.check_mode:
+ self.wait_for_state(
+ container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout)
+ new_container = self.container_create(image_to_use)
+ if new_container:
+ container = new_container
+ container_created = True
+ comparison_image = image
+
+ if container and container.exists:
+ container = self.update_limits(container, comparison_image)
+ container = self.update_networks(container, container_created)
+
+ if state == 'started' and not container.running:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ container = self.container_start(container.id)
+ elif state == 'started' and self.param_restart:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ self.diff_tracker.add('restarted', parameter=True, active=False)
+ container = self.container_restart(container.id)
+ elif state == 'stopped' and container.running:
+ self.diff_tracker.add('running', parameter=False, active=was_running)
+ self.container_stop(container.id)
+ container = self._get_container(container.id)
+
+ if state == 'started' and self.param_paused is not None and container.paused != self.param_paused:
+ self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused)
+ if not self.check_mode:
+ try:
+ if self.param_paused:
+ self.engine_driver.pause_container(self.client, container.id)
+ else:
+ self.engine_driver.unpause_container(self.client, container.id)
+ except Exception as exc:
+ self.fail("Error %s container %s: %s" % (
+ "pausing" if self.param_paused else "unpausing", container.id, to_native(exc)
+ ))
+ container = self._get_container(container.id)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(set_paused=self.param_paused))
+
+ self.facts = container.raw
+
+ def absent(self):
+ container = self._get_container(self.param_name)
+ if container.exists:
+ if container.running:
+ self.diff_tracker.add('running', parameter=False, active=True)
+ self.container_stop(container.id)
+ self.diff_tracker.add('exists', parameter=False, active=True)
+ self.container_remove(container.id)
+
+ def _output_logs(self, msg):
+ self.module.log(msg=msg)
+
+ def _get_container(self, container):
+ '''
+ Expects container ID or Name. Returns a container object
+ '''
+ container = self.engine_driver.inspect_container_by_name(self.client, container)
+ return Container(container, self.engine_driver)
+
+ def _get_container_image(self, container, fallback=None):
+ if not container.exists or container.removing:
+ return fallback
+ image = container.image
+ if is_image_name_id(image):
+ image = self.engine_driver.inspect_image_by_id(self.client, image)
+ else:
+ repository, tag = parse_repository_tag(image)
+ if not tag:
+ tag = "latest"
+ image = self.engine_driver.inspect_image_by_name(self.client, repository, tag)
+ return image or fallback
+
+ def _get_image(self, container):
+ image_parameter = self.param_image
+ if not image_parameter:
+ self.log('No image specified')
+ return None, self._get_container_image(container)
+ if is_image_name_id(image_parameter):
+ image = self.engine_driver.inspect_image_by_id(self.client, image_parameter)
+ else:
+ repository, tag = parse_repository_tag(image_parameter)
+ if not tag:
+ tag = "latest"
+ image = self.engine_driver.inspect_image_by_name(self.client, repository, tag)
+ if not image or self.param_pull:
+ if not self.check_mode:
+ self.log("Pull the image.")
+ image, alreadyToLatest = self.engine_driver.pull_image(
+ self.client, repository, tag, platform=self.module.params['platform'])
+ if alreadyToLatest:
+ self.results['changed'] = False
+ else:
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+ elif not image:
+ # If the image isn't there, claim we'll pull.
+ # (Implicitly: if the image is there, claim it already was latest.)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+
+ self.log("image")
+ self.log(image, pretty_print=True)
+
+ comparison_image = image
+ if self.param_image_comparison == 'current-image':
+ comparison_image = self._get_container_image(container, image)
+ if comparison_image != image:
+ self.log("current image")
+ self.log(comparison_image, pretty_print=True)
+
+ return image, comparison_image
+
+ def _image_is_different(self, image, container):
+ if image and image.get('Id'):
+ if container and container.image:
+ if image.get('Id') != container.image:
+ self.diff_tracker.add('image', parameter=image.get('Id'), active=container.image)
+ return True
+ return False
+
+ def _compose_create_parameters(self, image):
+ params = {}
+ for options, values in self.parameters:
+ engine = options.get_engine(self.engine_driver.name)
+ if engine.can_set_value(self.engine_driver.get_api_version(self.client)):
+ engine.set_value(self.module, params, self.engine_driver.get_api_version(self.client), options.options, values)
+ params['Image'] = image
+ return params
+
+ def _record_differences(self, differences, options, param_values, engine, container, image):
+ container_values = engine.get_value(self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options)
+ expected_values = engine.get_expected_values(
+ self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy())
+ for option in options.options:
+ if option.name in expected_values:
+ param_value = expected_values[option.name]
+ container_value = container_values.get(option.name)
+ match = compare_generic(param_value, container_value, option.comparison, option.comparison_type)
+
+ if not match:
+ # No match.
+ if engine.ignore_mismatching_result(self.module, self.client, self.engine_driver.get_api_version(self.client),
+ option, image, container_value, param_value):
+ # Ignore the result
+ continue
+
+ # Record the differences
+ p = param_value
+ c = container_value
+ if option.comparison_type == 'set':
+ # Since the order does not matter, sort so that the diff output is better.
+ if p is not None:
+ p = sorted(p)
+ if c is not None:
+ c = sorted(c)
+ elif option.comparison_type == 'set(dict)':
+ # Since the order does not matter, sort so that the diff output is better.
+ if option.name == 'expected_mounts':
+ # For selected values, use one entry as key
+ def sort_key_fn(x):
+ return x['target']
+ else:
+ # We sort the list of dictionaries by using the sorted items of a dict as its key.
+ def sort_key_fn(x):
+ return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
+ if p is not None:
+ p = sorted(p, key=sort_key_fn)
+ if c is not None:
+ c = sorted(c, key=sort_key_fn)
+ differences.add(option.name, parameter=p, active=c)
+
+ def has_different_configuration(self, container, image):
+ differences = DifferenceTracker()
+ update_differences = DifferenceTracker()
+ for options, param_values in self.parameters:
+ engine = options.get_engine(self.engine_driver.name)
+ if engine.can_update_value(self.engine_driver.get_api_version(self.client)):
+ self._record_differences(update_differences, options, param_values, engine, container, image)
+ else:
+ self._record_differences(differences, options, param_values, engine, container, image)
+ has_differences = not differences.empty
+ # Only consider differences of properties that can be updated when there are also other differences
+ if has_differences:
+ differences.merge(update_differences)
+ return has_differences, differences
+
+ def has_different_resource_limits(self, container, image):
+ differences = DifferenceTracker()
+ for options, param_values in self.parameters:
+ engine = options.get_engine(self.engine_driver.name)
+ if not engine.can_update_value(self.engine_driver.get_api_version(self.client)):
+ continue
+ self._record_differences(differences, options, param_values, engine, container, image)
+ has_differences = not differences.empty
+ return has_differences, differences
+
+ def _compose_update_parameters(self):
+ result = {}
+ for options, values in self.parameters:
+ engine = options.get_engine(self.engine_driver.name)
+ if not engine.can_update_value(self.engine_driver.get_api_version(self.client)):
+ continue
+ engine.update_value(self.module, result, self.engine_driver.get_api_version(self.client), options.options, values)
+ return result
+
+ def update_limits(self, container, image):
+ limits_differ, different_limits = self.has_different_resource_limits(container, image)
+ if limits_differ:
+ self.log("limit differences:")
+ self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
+ self.diff_tracker.merge(different_limits)
+ if limits_differ and not self.check_mode:
+ self.container_update(container.id, self._compose_update_parameters())
+ return self._get_container(container.id)
+ return container
+
+ def has_network_differences(self, container):
+ '''
+ Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
+ '''
+ different = False
+ differences = []
+
+ if not self.module.params['networks']:
+ return different, differences
+
+ if not container.container.get('NetworkSettings'):
+ self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = container.container['NetworkSettings']['Networks']
+ for network in self.module.params['networks']:
+ network_info = connected_networks.get(network['name'])
+ if network_info is None:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=None
+ ))
+ else:
+ diff = False
+ network_info_ipam = network_info.get('IPAMConfig') or {}
+ if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
+ diff = True
+ if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
+ diff = True
+ if network.get('aliases'):
+ if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
+ diff = True
+ if network.get('links'):
+ expected_links = []
+ for link, alias in network['links']:
+ expected_links.append("%s:%s" % (link, alias))
+ if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
+ diff = True
+ if diff:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=dict(
+ name=network['name'],
+ ipv4_address=network_info_ipam.get('IPv4Address'),
+ ipv6_address=network_info_ipam.get('IPv6Address'),
+ aliases=network_info.get('Aliases'),
+ links=network_info.get('Links')
+ )
+ ))
+ return different, differences
+
+ def has_extra_networks(self, container):
+ '''
+ Check if the container is connected to non-requested networks
+ '''
+ extra_networks = []
+ extra = False
+
+ if not container.container.get('NetworkSettings'):
+ self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = container.container['NetworkSettings'].get('Networks')
+ if connected_networks:
+ for network, network_config in connected_networks.items():
+ keep = False
+ if self.module.params['networks']:
+ for expected_network in self.module.params['networks']:
+ if expected_network['name'] == network:
+ keep = True
+ if not keep:
+ extra = True
+ extra_networks.append(dict(name=network, id=network_config['NetworkID']))
+ return extra, extra_networks
+
+ def update_networks(self, container, container_created):
+ updated_container = container
+ if self.all_options['networks'].comparison != 'ignore' or container_created:
+ has_network_differences, network_differences = self.has_network_differences(container)
+ if has_network_differences:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(network_differences=network_differences))
+ else:
+ self.diff['differences'] = [dict(network_differences=network_differences)]
+ for netdiff in network_differences:
+ self.diff_tracker.add(
+ 'network.{0}'.format(netdiff['parameter']['name']),
+ parameter=netdiff['parameter'],
+ active=netdiff['container']
+ )
+ self.results['changed'] = True
+ updated_container = self._add_networks(container, network_differences)
+
+ purge_networks = self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None
+ if not purge_networks and self.module.params['purge_networks']:
+ purge_networks = True
+ self.module.deprecate(
+ 'The purge_networks option is used while networks is not specified. In this case purge_networks=true cannot'
+ ' be replaced by `networks: strict` in comparisons, which is necessary once purge_networks is removed.'
+ ' Please modify the docker_container invocation by adding `networks: []`',
+ version='4.0.0', collection_name='community.docker')
+ if purge_networks:
+ has_extra_networks, extra_networks = self.has_extra_networks(container)
+ if has_extra_networks:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(purge_networks=extra_networks))
+ else:
+ self.diff['differences'] = [dict(purge_networks=extra_networks)]
+ for extra_network in extra_networks:
+ self.diff_tracker.add(
+ 'network.{0}'.format(extra_network['name']),
+ active=extra_network
+ )
+ self.results['changed'] = True
+ updated_container = self._purge_networks(container, extra_networks)
+ return updated_container
+
+ def _add_networks(self, container, differences):
+ for diff in differences:
+ # remove the container from the network, if connected
+ if diff.get('container'):
+ self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
+ if not self.check_mode:
+ try:
+ self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
+ to_native(exc)))
+ # connect to the network
+ self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter']))
+ if not self.check_mode:
+ params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')}
+ try:
+ self.log("Connecting container to network %s" % diff['parameter']['id'])
+ self.log(params, pretty_print=True)
+ self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params)
+ except Exception as exc:
+ self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
+ return self._get_container(container.id)
+
+ def _purge_networks(self, container, networks):
+ for network in networks:
+ self.results['actions'].append(dict(removed_from_network=network['name']))
+ if not self.check_mode:
+ try:
+ self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (network['name'],
+ to_native(exc)))
+ return self._get_container(container.id)
+
+ def container_create(self, image):
+ create_parameters = self._compose_create_parameters(image)
+ self.log("create container")
+ self.log("image: %s parameters:" % image)
+ self.log(create_parameters, pretty_print=True)
+ self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
+ self.results['changed'] = True
+ new_container = None
+ if not self.check_mode:
+ try:
+ container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters)
+ except Exception as exc:
+ self.fail("Error creating container: %s" % to_native(exc))
+ return self._get_container(container_id)
+ return new_container
+
+ def container_start(self, container_id):
+ self.log("start container %s" % (container_id))
+ self.results['actions'].append(dict(started=container_id))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.start_container(self.client, container_id)
+ except Exception as exc:
+ self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
+
+ if self.module.params['detach'] is False:
+ status = self.engine_driver.wait_for_container(self.client, container_id)
+ self.client.fail_results['status'] = status
+ self.results['status'] = status
+
+ if self.module.params['auto_remove']:
+ output = "Cannot retrieve result as auto_remove is enabled"
+ if self.param_output_logs:
+ self.module.warn('Cannot output_logs if auto_remove is enabled!')
+ else:
+ output, real_output = self.engine_driver.get_container_output(self.client, container_id)
+ if real_output and self.param_output_logs:
+ self._output_logs(msg=output)
+
+ if self.param_cleanup:
+ self.container_remove(container_id, force=True)
+ insp = self._get_container(container_id)
+ if insp.raw:
+ insp.raw['Output'] = output
+ else:
+ insp.raw = dict(Output=output)
+ if status != 0:
+ # Set `failed` to True and return output as msg
+ self.results['failed'] = True
+ self.results['msg'] = output
+ return insp
+ return self._get_container(container_id)
+
+ def container_remove(self, container_id, link=False, force=False):
+ volume_state = (not self.param_keep_volumes)
+ self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
+ self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force)
+ except Exception as exc:
+ self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+
+ def container_update(self, container_id, update_parameters):
+ if update_parameters:
+ self.log("update container %s" % (container_id))
+ self.log(update_parameters, pretty_print=True)
+ self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.update_container(self.client, container_id, update_parameters)
+ except Exception as exc:
+ self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_kill(self, container_id):
+ self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal)
+ except Exception as exc:
+ self.fail("Error killing container %s: %s" % (container_id, to_native(exc)))
+
+ def container_restart(self, container_id):
+ self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout']))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10)
+ except Exception as exc:
+ self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_stop(self, container_id):
+ if self.param_force_kill:
+ self.container_kill(container_id)
+ return
+ self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout']))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout'])
+ except Exception as exc:
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+
+
+def run_module(engine_driver):
+ module, active_options, client = engine_driver.setup(
+ argument_spec=dict(
+ cleanup=dict(type='bool', default=False),
+ comparisons=dict(type='dict'),
+ container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
+ command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'),
+ default_host_ip=dict(type='str'),
+ force_kill=dict(type='bool', default=False, aliases=['forcekill']),
+ ignore_image=dict(type='bool', default=False, removed_in_version='4.0.0', removed_from_collection='community.docker'),
+ image=dict(type='str'),
+ image_comparison=dict(type='str', choices=['desired-image', 'current-image'], default='desired-image'),
+ image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'),
+ image_name_mismatch=dict(type='str', choices=['ignore', 'recreate'], default='ignore'),
+ keep_volumes=dict(type='bool', default=True),
+ kill_signal=dict(type='str'),
+ name=dict(type='str', required=True),
+ networks_cli_compatible=dict(type='bool', default=True),
+ output_logs=dict(type='bool', default=False),
+ paused=dict(type='bool'),
+ pull=dict(type='bool', default=False),
+ purge_networks=dict(type='bool', default=False, removed_in_version='4.0.0', removed_from_collection='community.docker'),
+ recreate=dict(type='bool', default=False),
+ removal_wait_timeout=dict(type='float'),
+ restart=dict(type='bool', default=False),
+ state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
+ ),
+ required_if=[
+ ('state', 'present', ['image'])
+ ],
+ )
+
+ def execute():
+ cm = ContainerManager(module, engine_driver, client, active_options)
+ cm.run()
+ module.exit_json(**sanitize_result(cm.results))
+
+ engine_driver.run(execute, client)
diff --git a/ansible_collections/community/docker/plugins/module_utils/socket_handler.py b/ansible_collections/community/docker/plugins/module_utils/socket_handler.py
new file mode 100644
index 00000000..f4a3ef79
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/socket_handler.py
@@ -0,0 +1,210 @@
+# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import os.path
+import socket as pysocket
+
+from ansible.module_utils.six import PY2
+
+try:
+ from docker.utils import socket as docker_socket
+ import struct
+except Exception:
+ # missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.socket_helper import (
+ make_unblocking,
+ shutdown_writing,
+ write_to_socket,
+)
+
+
+PARAMIKO_POLL_TIMEOUT = 0.01 # 10 milliseconds
+
+
+class DockerSocketHandlerBase(object):
+ def __init__(self, sock, selectors, log=None):
+ make_unblocking(sock)
+
+ self._selectors = selectors
+ if log is not None:
+ self._log = log
+ else:
+ self._log = lambda msg: True
+ self._paramiko_read_workaround = hasattr(sock, 'send_ready') and 'paramiko' in str(type(sock))
+
+ self._sock = sock
+ self._block_done_callback = None
+ self._block_buffer = []
+ self._eof = False
+ self._read_buffer = b''
+ self._write_buffer = b''
+ self._end_of_writing = False
+
+ self._current_stream = None
+ self._current_missing = 0
+ self._current_buffer = b''
+
+ self._selector = self._selectors.DefaultSelector()
+ self._selector.register(self._sock, self._selectors.EVENT_READ)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self._selector.close()
+
+ def set_block_done_callback(self, block_done_callback):
+ self._block_done_callback = block_done_callback
+ if self._block_done_callback is not None:
+ while self._block_buffer:
+ elt = self._block_buffer.remove(0)
+ self._block_done_callback(*elt)
+
+ def _add_block(self, stream_id, data):
+ if self._block_done_callback is not None:
+ self._block_done_callback(stream_id, data)
+ else:
+ self._block_buffer.append((stream_id, data))
+
+ def _read(self):
+ if self._eof:
+ return
+ if hasattr(self._sock, 'recv'):
+ try:
+ data = self._sock.recv(262144)
+ except Exception as e:
+ # After calling self._sock.shutdown(), OpenSSL's/urllib3's
+ # WrappedSocket seems to eventually raise ZeroReturnError in
+ # case of EOF
+ if 'OpenSSL.SSL.ZeroReturnError' in str(type(e)):
+ self._eof = True
+ return
+ else:
+ raise
+ elif not PY2 and isinstance(self._sock, getattr(pysocket, 'SocketIO')):
+ data = self._sock.read()
+ else:
+ data = os.read(self._sock.fileno())
+ if data is None:
+ # no data available
+ return
+ self._log('read {0} bytes'.format(len(data)))
+ if len(data) == 0:
+ # Stream EOF
+ self._eof = True
+ return
+ self._read_buffer += data
+ while len(self._read_buffer) > 0:
+ if self._current_missing > 0:
+ n = min(len(self._read_buffer), self._current_missing)
+ self._current_buffer += self._read_buffer[:n]
+ self._read_buffer = self._read_buffer[n:]
+ self._current_missing -= n
+ if self._current_missing == 0:
+ self._add_block(self._current_stream, self._current_buffer)
+ self._current_buffer = b''
+ if len(self._read_buffer) < 8:
+ break
+ self._current_stream, self._current_missing = struct.unpack('>BxxxL', self._read_buffer[:8])
+ self._read_buffer = self._read_buffer[8:]
+ if self._current_missing < 0:
+ # Stream EOF (as reported by docker daemon)
+ self._eof = True
+ break
+
+ def _handle_end_of_writing(self):
+ if self._end_of_writing and len(self._write_buffer) == 0:
+ self._end_of_writing = False
+ self._log('Shutting socket down for writing')
+ shutdown_writing(self._sock, self._log)
+
+ def _write(self):
+ if len(self._write_buffer) > 0:
+ written = write_to_socket(self._sock, self._write_buffer)
+ self._write_buffer = self._write_buffer[written:]
+ self._log('wrote {0} bytes, {1} are left'.format(written, len(self._write_buffer)))
+ if len(self._write_buffer) > 0:
+ self._selector.modify(self._sock, self._selectors.EVENT_READ | self._selectors.EVENT_WRITE)
+ else:
+ self._selector.modify(self._sock, self._selectors.EVENT_READ)
+ self._handle_end_of_writing()
+
+ def select(self, timeout=None, _internal_recursion=False):
+ if not _internal_recursion and self._paramiko_read_workaround and len(self._write_buffer) > 0:
+ # When the SSH transport is used, Docker SDK for Python internally uses Paramiko, whose
+ # Channel object supports select(), but only for reading
+ # (https://github.com/paramiko/paramiko/issues/695).
+ if self._sock.send_ready():
+ self._write()
+ return True
+ while timeout is None or timeout > PARAMIKO_POLL_TIMEOUT:
+ result = self.select(PARAMIKO_POLL_TIMEOUT, _internal_recursion=True)
+ if self._sock.send_ready():
+ self._read()
+ result += 1
+ if result > 0:
+ return True
+ if timeout is not None:
+ timeout -= PARAMIKO_POLL_TIMEOUT
+ self._log('select... ({0})'.format(timeout))
+ events = self._selector.select(timeout)
+ for key, event in events:
+ if key.fileobj == self._sock:
+ self._log(
+ 'select event read:{0} write:{1}'.format(
+ event & self._selectors.EVENT_READ != 0,
+ event & self._selectors.EVENT_WRITE != 0))
+ if event & self._selectors.EVENT_READ != 0:
+ self._read()
+ if event & self._selectors.EVENT_WRITE != 0:
+ self._write()
+ result = len(events)
+ if self._paramiko_read_workaround and len(self._write_buffer) > 0:
+ if self._sock.send_ready():
+ self._write()
+ result += 1
+ return result > 0
+
+ def is_eof(self):
+ return self._eof
+
+ def end_of_writing(self):
+ self._end_of_writing = True
+ self._handle_end_of_writing()
+
+ def consume(self):
+ stdout = []
+ stderr = []
+
+ def append_block(stream_id, data):
+ if stream_id == docker_socket.STDOUT:
+ stdout.append(data)
+ elif stream_id == docker_socket.STDERR:
+ stderr.append(data)
+ else:
+ raise ValueError('{0} is not a valid stream ID'.format(stream_id))
+
+ self.end_of_writing()
+
+ self.set_block_done_callback(append_block)
+ while not self._eof:
+ self.select()
+ return b''.join(stdout), b''.join(stderr)
+
+ def write(self, str):
+ self._write_buffer += str
+ if len(self._write_buffer) == len(str):
+ self._write()
+
+
+class DockerSocketHandlerModule(DockerSocketHandlerBase):
+ def __init__(self, sock, module, selectors):
+ super(DockerSocketHandlerModule, self).__init__(sock, selectors, module.debug)
diff --git a/ansible_collections/community/docker/plugins/module_utils/socket_helper.py b/ansible_collections/community/docker/plugins/module_utils/socket_helper.py
new file mode 100644
index 00000000..a0885f72
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/socket_helper.py
@@ -0,0 +1,62 @@
+# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import fcntl
+import os
+import os.path
+import socket as pysocket
+
+from ansible.module_utils.six import PY2
+
+
+def make_file_unblocking(file):
+ fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fcntl.fcntl(file.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+
+
+def make_file_blocking(file):
+ fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fcntl.fcntl(file.fileno(), fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+
+def make_unblocking(sock):
+ if hasattr(sock, '_sock'):
+ sock._sock.setblocking(0)
+ elif hasattr(sock, 'setblocking'):
+ sock.setblocking(0)
+ else:
+ make_file_unblocking(sock)
+
+
+def _empty_writer(msg):
+ pass
+
+
+def shutdown_writing(sock, log=_empty_writer):
+ if hasattr(sock, 'shutdown_write'):
+ sock.shutdown_write()
+ elif hasattr(sock, 'shutdown'):
+ try:
+ sock.shutdown(pysocket.SHUT_WR)
+ except TypeError as e:
+ # probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
+ log('Shutting down for writing not possible; trying shutdown instead: {0}'.format(e))
+ sock.shutdown()
+ elif not PY2 and isinstance(sock, getattr(pysocket, 'SocketIO')):
+ sock._sock.shutdown(pysocket.SHUT_WR)
+ else:
+ log('No idea how to signal end of writing')
+
+
+def write_to_socket(sock, data):
+ if hasattr(sock, '_send_until_done'):
+ # WrappedSocket (urllib3/contrib/pyopenssl) doesn't have `send`, but
+ # only `sendall`, which uses `_send_until_done` under the hood.
+ return sock._send_until_done(data)
+ elif hasattr(sock, 'send'):
+ return sock.send(data)
+ else:
+ return os.write(sock.fileno(), data)
diff --git a/ansible_collections/community/docker/plugins/module_utils/swarm.py b/ansible_collections/community/docker/plugins/module_utils/swarm.py
new file mode 100644
index 00000000..0dbc1e72
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/swarm.py
@@ -0,0 +1,281 @@
+# Copyright (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
+# Copyright (c) Thierry Bouvet (@tbouvet)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+from time import sleep
+
+try:
+ from docker.errors import APIError, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils.common import AnsibleDockerClient
+
+
+class AnsibleDockerSwarmClient(AnsibleDockerClient):
+
+ def __init__(self, **kwargs):
+ super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
+
+ def get_swarm_node_id(self):
+ """
+ Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
+ of Docker host the module is executed on
+ :return:
+ NodeID of host or 'None' if not part of Swarm
+ """
+
+ try:
+ info = self.info()
+ except APIError as exc:
+ self.fail("Failed to get node information for %s" % to_native(exc))
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return swarm_info['Swarm']['NodeID']
+ return None
+
+ def check_if_swarm_node(self, node_id=None):
+ """
+ Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
+ system information looking if specific key in output exists. If 'node_id' is provided then it tries to
+ read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
+ it is not executed on Swarm manager
+
+ :param node_id: Node identifier
+ :return:
+ bool: True if node is part of Swarm, False otherwise
+ """
+
+ if node_id is None:
+ try:
+ info = self.info()
+ except APIError:
+ self.fail("Failed to get host information.")
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return True
+ if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
+ return True
+ return False
+ else:
+ try:
+ node_info = self.get_node_inspect(node_id=node_id)
+ except APIError:
+ return
+
+ if node_info['ID'] is not None:
+ return True
+ return False
+
+ def check_if_swarm_manager(self):
+ """
+ Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
+ is performed. The inspect_swarm() will fail if node is not a manager
+
+ :return: True if node is Swarm Manager, False otherwise
+ """
+
+ try:
+ self.inspect_swarm()
+ return True
+ except APIError:
+ return False
+
+ def fail_task_if_not_swarm_manager(self):
+ """
+ If host is not a swarm manager then Ansible task on this host should end with 'failed' state
+ """
+ if not self.check_if_swarm_manager():
+ self.fail("Error running docker swarm module: must run on swarm manager node")
+
+ def check_if_swarm_worker(self):
+ """
+ Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
+ is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
+
+ :return: True if node is Swarm Worker, False otherwise
+ """
+
+ if self.check_if_swarm_node() and not self.check_if_swarm_manager():
+ return True
+ return False
+
+ def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
+ """
+ Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
+ node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
+ host that is not part of Swarm it will fail the playbook
+
+ :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ True if node is part of swarm but its state is down, False otherwise
+ """
+
+ if repeat_check < 1:
+ repeat_check = 1
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ for retry in range(0, repeat_check):
+ if retry > 0:
+ sleep(5)
+ node_info = self.get_node_inspect(node_id=node_id)
+ if node_info['Status']['State'] == 'down':
+ return True
+ return False
+
+ def get_node_inspect(self, node_id=None, skip_missing=False):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about single node
+
+ :param skip_missing: if True then function will return None instead of failing the task
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ Single node information structure
+ """
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ if node_id is None:
+ self.fail("Failed to get node information.")
+
+ try:
+ node_info = self.inspect_node(node_id=node_id)
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ if exc.status_code == 404:
+ if skip_missing:
+ return None
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+
+ if 'ManagerStatus' in node_info:
+ if node_info['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ count_colons = node_info['ManagerStatus']['Addr'].count(":")
+ if count_colons == 1:
+ swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
+ else:
+ swarm_leader_ip = node_info['Status']['Addr']
+ node_info['Status']['Addr'] = swarm_leader_ip
+ return node_info
+
+ def get_all_nodes_inspect(self):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about all registered nodes
+
+ :return:
+ Structure with information about all nodes
+ """
+ try:
+ node_info = self.nodes()
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+ return node_info
+
+ def get_all_nodes_list(self, output='short'):
+ """
+ Returns list of nodes registered in Swarm
+
+ :param output: Defines format of returned data
+ :return:
+ If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
+ if 'output' is 'long' then returns data is list of dict containing the attributes as in
+ output of command 'docker node ls'
+ """
+ nodes_list = []
+
+ nodes_inspect = self.get_all_nodes_inspect()
+ if nodes_inspect is None:
+ return None
+
+ if output == 'short':
+ for node in nodes_inspect:
+ nodes_list.append(node['Description']['Hostname'])
+ elif output == 'long':
+ for node in nodes_inspect:
+ node_property = {}
+
+ node_property.update({'ID': node['ID']})
+ node_property.update({'Hostname': node['Description']['Hostname']})
+ node_property.update({'Status': node['Status']['State']})
+ node_property.update({'Availability': node['Spec']['Availability']})
+ if 'ManagerStatus' in node:
+ if node['ManagerStatus']['Leader'] is True:
+ node_property.update({'Leader': True})
+ node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
+ node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
+
+ nodes_list.append(node_property)
+ else:
+ return None
+
+ return nodes_list
+
+ def get_node_name_by_id(self, nodeid):
+ return self.get_node_inspect(nodeid)['Description']['Hostname']
+
+ def get_unlock_key(self):
+ if self.docker_py_version < LooseVersion('2.7.0'):
+ return None
+ return super(AnsibleDockerSwarmClient, self).get_unlock_key()
+
+ def get_service_inspect(self, service_id, skip_missing=False):
+ """
+ Returns Swarm service info as in 'docker service inspect' command about single service
+
+ :param service_id: service ID or name
+ :param skip_missing: if True then function will return None instead of failing the task
+ :return:
+ Single service information structure
+ """
+ try:
+ service_info = self.inspect_service(service_id)
+ except NotFound as exc:
+ if skip_missing is False:
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ else:
+ return None
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
+ self.fail("Error inspecting swarm service: %s" % exc)
+ except Exception as exc:
+ self.fail("Error inspecting swarm service: %s" % exc)
+
+ json_str = json.dumps(service_info, ensure_ascii=False)
+ service_info = json.loads(json_str)
+ return service_info
diff --git a/ansible_collections/community/docker/plugins/module_utils/util.py b/ansible_collections/community/docker/plugins/module_utils/util.py
new file mode 100644
index 00000000..9c6b738c
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/util.py
@@ -0,0 +1,394 @@
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import re
+from datetime import timedelta
+
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+
+DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
+DEFAULT_TLS = False
+DEFAULT_TLS_VERIFY = False
+DEFAULT_TLS_HOSTNAME = 'localhost' # deprecated
+DEFAULT_TIMEOUT_SECONDS = 60
+
+DOCKER_COMMON_ARGS = dict(
+ docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
+ tls_hostname=dict(type='str', fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
+ api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
+ ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
+ client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
+ client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
+ ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
+ tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
+ use_ssh_client=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
+ debug=dict(type='bool', default=False)
+)
+
+DOCKER_COMMON_ARGS_VARS = dict([
+ [option_name, 'ansible_docker_%s' % option_name]
+ for option_name in DOCKER_COMMON_ARGS
+ if option_name != 'debug'
+])
+
+DOCKER_MUTUALLY_EXCLUSIVE = []
+
+DOCKER_REQUIRED_TOGETHER = [
+ ['client_cert', 'client_key']
+]
+
+DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
+BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+
+
+def is_image_name_id(name):
+ """Check whether the given image name is in fact an image ID (hash)."""
+ if re.match('^sha256:[0-9a-fA-F]{64}$', name):
+ return True
+ return False
+
+
+def is_valid_tag(tag, allow_empty=False):
+ """Check whether the given string is a valid docker tag name."""
+ if not tag:
+ return allow_empty
+ # See here ("Extended description") for a definition what tags can be:
+ # https://docs.docker.com/engine/reference/commandline/tag/
+ return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
+
+
+def sanitize_result(data):
+ """Sanitize data object for return to Ansible.
+
+ When the data object contains types such as docker.types.containers.HostConfig,
+ Ansible will fail when these are returned via exit_json or fail_json.
+ HostConfig is derived from dict, but its constructor requires additional
+ arguments. This function sanitizes data structures by recursively converting
+ everything derived from dict to dict and everything derived from list (and tuple)
+ to a list.
+ """
+ if isinstance(data, dict):
+ return dict((k, sanitize_result(v)) for k, v in data.items())
+ elif isinstance(data, (list, tuple)):
+ return [sanitize_result(v) for v in data]
+ else:
+ return data
+
+
+class DockerBaseClass(object):
+ def __init__(self):
+ self.debug = False
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+
+def update_tls_hostname(result, old_behavior=False, deprecate_function=None, uses_tls=True):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ result['tls_hostname'] = parsed_url.netloc.rsplit(':', 1)[0]
+
+
+def compare_dict_allow_more_present(av, bv):
+ '''
+ Compare two dictionaries for whether every entry of the first is in the second.
+ '''
+ for key, value in av.items():
+ if key not in bv:
+ return False
+ if bv[key] != value:
+ return False
+ return True
+
+
+def compare_generic(a, b, method, datatype):
+ '''
+ Compare values a and b as described by method and datatype.
+
+ Returns ``True`` if the values compare equal, and ``False`` if not.
+
+ ``a`` is usually the module's parameter, while ``b`` is a property
+ of the current object. ``a`` must not be ``None`` (except for
+ ``datatype == 'value'``).
+
+ Valid values for ``method`` are:
+ - ``ignore`` (always compare as equal);
+ - ``strict`` (only compare if really equal)
+ - ``allow_more_present`` (allow b to have elements which a does not have).
+
+ Valid values for ``datatype`` are:
+ - ``value``: for simple values (strings, numbers, ...);
+ - ``list``: for ``list``s or ``tuple``s where order matters;
+ - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
+ matter;
+ - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
+ not matter and which contain ``dict``s; ``allow_more_present`` is used
+ for the ``dict``s, and these are assumed to be dictionaries of values;
+ - ``dict``: for dictionaries of values.
+ '''
+ if method == 'ignore':
+ return True
+ # If a or b is None:
+ if a is None or b is None:
+ # If both are None: equality
+ if a == b:
+ return True
+ # Otherwise, not equal for values, and equal
+ # if the other is empty for set/list/dict
+ if datatype == 'value':
+ return False
+ # For allow_more_present, allow a to be None
+ if method == 'allow_more_present' and a is None:
+ return True
+ # Otherwise, the iterable object which is not None must have length 0
+ return len(b if a is None else a) == 0
+ # Do proper comparison (both objects not None)
+ if datatype == 'value':
+ return a == b
+ elif datatype == 'list':
+ if method == 'strict':
+ return a == b
+ else:
+ i = 0
+ for v in a:
+ while i < len(b) and b[i] != v:
+ i += 1
+ if i == len(b):
+ return False
+ i += 1
+ return True
+ elif datatype == 'dict':
+ if method == 'strict':
+ return a == b
+ else:
+ return compare_dict_allow_more_present(a, b)
+ elif datatype == 'set':
+ set_a = set(a)
+ set_b = set(b)
+ if method == 'strict':
+ return set_a == set_b
+ else:
+ return set_b >= set_a
+ elif datatype == 'set(dict)':
+ for av in a:
+ found = False
+ for bv in b:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ if method == 'strict':
+ # If we would know that both a and b do not contain duplicates,
+ # we could simply compare len(a) to len(b) to finish this test.
+ # We can assume that b has no duplicates (as it is returned by
+ # docker), but we don't know for a.
+ for bv in b:
+ found = False
+ for av in a:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ return True
+
+
+class DifferenceTracker(object):
+ def __init__(self):
+ self._diff = []
+
+ def add(self, name, parameter=None, active=None):
+ self._diff.append(dict(
+ name=name,
+ parameter=parameter,
+ active=active,
+ ))
+
+ def merge(self, other_tracker):
+ self._diff.extend(other_tracker._diff)
+
+ @property
+ def empty(self):
+ return len(self._diff) == 0
+
+ def get_before_after(self):
+ '''
+ Return texts ``before`` and ``after``.
+ '''
+ before = dict()
+ after = dict()
+ for item in self._diff:
+ before[item['name']] = item['active']
+ after[item['name']] = item['parameter']
+ return before, after
+
+ def has_difference_for(self, name):
+ '''
+ Returns a boolean if a difference exists for name
+ '''
+ return any(diff for diff in self._diff if diff['name'] == name)
+
+ def get_legacy_docker_container_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = []
+ for entry in self._diff:
+ item = dict()
+ item[entry['name']] = dict(
+ parameter=entry['parameter'],
+ container=entry['active'],
+ )
+ result.append(item)
+ return result
+
+ def get_legacy_docker_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = [entry['name'] for entry in self._diff]
+ return result
+
+
+def clean_dict_booleans_for_docker_api(data, allow_sequences=False):
+ '''
+ Go doesn't like Python booleans 'True' or 'False', while Ansible is just
+ fine with them in YAML. As such, they need to be converted in cases where
+ we pass dictionaries to the Docker API (e.g. docker_network's
+ driver_options and docker_prune's filters). When `allow_sequences=True`
+ YAML sequences (lists, tuples) are converted to [str] instead of str([...])
+ which is the expected format of filters which accept lists such as labels.
+ '''
+ def sanitize(value):
+ if value is True:
+ return 'true'
+ elif value is False:
+ return 'false'
+ else:
+ return str(value)
+
+ result = dict()
+ if data is not None:
+ for k, v in data.items():
+ result[str(k)] = [sanitize(e) for e in v] if allow_sequences and is_sequence(v) else sanitize(v)
+ return result
+
+
+def convert_duration_to_nanosecond(time_str):
+ """
+ Return time duration in nanosecond.
+ """
+ if not isinstance(time_str, str):
+ raise ValueError('Missing unit in duration - %s' % time_str)
+
+ regex = re.compile(
+ r'^(((?P<hours>\d+)h)?'
+ r'((?P<minutes>\d+)m(?!s))?'
+ r'((?P<seconds>\d+)s)?'
+ r'((?P<milliseconds>\d+)ms)?'
+ r'((?P<microseconds>\d+)us)?)$'
+ )
+ parts = regex.match(time_str)
+
+ if not parts:
+ raise ValueError('Invalid time duration - %s' % time_str)
+
+ parts = parts.groupdict()
+ time_params = {}
+ for (name, value) in parts.items():
+ if value:
+ time_params[name] = int(value)
+
+ delta = timedelta(**time_params)
+ time_in_nanoseconds = (
+ delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
+ ) * 10 ** 3
+
+ return time_in_nanoseconds
+
+
+def normalize_healthcheck_test(test):
+ if isinstance(test, (tuple, list)):
+ return [str(e) for e in test]
+ return ['CMD-SHELL', str(test)]
+
+
+def normalize_healthcheck(healthcheck, normalize_test=False):
+ """
+ Return dictionary of healthcheck parameters.
+ """
+ result = dict()
+
+ # All supported healthcheck parameters
+ options = ('test', 'interval', 'timeout', 'start_period', 'retries')
+
+ duration_options = ('interval', 'timeout', 'start_period')
+
+ for key in options:
+ if key in healthcheck:
+ value = healthcheck[key]
+ if value is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if key in duration_options:
+ value = convert_duration_to_nanosecond(value)
+ if not value:
+ continue
+ if key == 'retries':
+ try:
+ value = int(value)
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse number of retries for healthcheck. '
+ 'Expected an integer, got "{0}".'.format(value)
+ )
+ if key == 'test' and normalize_test:
+ value = normalize_healthcheck_test(value)
+ result[key] = value
+
+ return result
+
+
+def parse_healthcheck(healthcheck):
+ """
+ Return dictionary of healthcheck parameters and boolean if
+ healthcheck defined in image was requested to be disabled.
+ """
+ if (not healthcheck) or (not healthcheck.get('test')):
+ return None, None
+
+ result = normalize_healthcheck(healthcheck, normalize_test=True)
+
+ if result['test'] == ['NONE']:
+ # If the user explicitly disables the healthcheck, return None
+ # as the healthcheck object, and set disable_healthcheck to True
+ return None, True
+
+ return result, False
+
+
+def omit_none_from_dict(d):
+ """
+ Return a copy of the dictionary with all keys with value None omitted.
+ """
+ return dict((k, v) for (k, v) in d.items() if v is not None)
diff --git a/ansible_collections/community/docker/plugins/module_utils/version.py b/ansible_collections/community/docker/plugins/module_utils/version.py
new file mode 100644
index 00000000..5184d70e
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/version.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Provide version object to compare version numbers."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+# Once we drop support for ansible-core 2.11, we can remove the try/except.
+
+from ansible.module_utils.six import raise_from
+
+try:
+ from ansible.module_utils.compat.version import LooseVersion, StrictVersion # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ try:
+ from distutils.version import LooseVersion, StrictVersion # noqa: F401, pylint: disable=unused-import
+ except ImportError as exc:
+ msg = 'To use this plugin or module with ansible-core 2.11, you need to use Python < 3.12 with distutils.version present'
+ raise_from(ImportError(msg), exc)
diff --git a/ansible_collections/community/docker/plugins/modules/current_container_facts.py b/ansible_collections/community/docker/plugins/modules/current_container_facts.py
new file mode 100644
index 00000000..f2cde2b5
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/current_container_facts.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2020 Matt Clay <mclay@redhat.com>
+# Copyright (c) 2020 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: current_container_facts
+short_description: Return facts about whether the module runs in a container
+version_added: 1.1.0
+description:
+ - Return facts about whether the module runs in a Docker or podman container.
+ - This module attempts a best-effort detection. There might be special cases where
+ it does not work; if you encounter one, L(please file an issue,
+ https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=bug_report.md).
+author:
+ - Felix Fontein (@felixfontein)
+extends_documentation_fragment:
+ - community.docker.attributes
+ - community.docker.attributes.facts
+ - community.docker.attributes.facts_module
+'''
+
+EXAMPLES = '''
+- name: Get facts on current container
+ community.docker.current_container_facts:
+
+- name: Print information on current container when running in a container
+ ansible.builtin.debug:
+ msg: "Container ID is {{ ansible_module_container_id }}"
+ when: ansible_module_running_in_container
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Ansible facts returned by the module
+ type: dict
+ returned: always
+ contains:
+ ansible_module_running_in_container:
+ description:
+ - Whether the module was able to detect that it runs in a container or not.
+ returned: always
+ type: bool
+ ansible_module_container_id:
+ description:
+ - The detected container ID.
+ - Contains an empty string if no container was detected.
+ returned: always
+ type: str
+ ansible_module_container_type:
+ description:
+ - The detected container environment.
+ - Contains an empty string if no container was detected, or a non-empty string identifying the container environment.
+ - C(docker) indicates that the module ran inside a regular Docker container.
+ - C(azure_pipelines) indicates that the module ran on Azure Pipelines. This seems to no longer be reported.
+ - C(github_actions) indicates that the module ran inside a Docker container on GitHub Actions. It is supported since community.docker 2.4.0.
+ - C(podman) indicates that the module ran inside a regular Podman container. It is supported since community.docker 3.3.0.
+ returned: always
+ type: str
+ choices:
+ - ''
+ - docker
+ - azure_pipelines
+ - github_actions
+ - podman
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(dict(), supports_check_mode=True)
+
+ cpuset_path = '/proc/self/cpuset'
+ mountinfo_path = '/proc/self/mountinfo'
+
+ container_id = ''
+ container_type = ''
+
+ contents = None
+ if os.path.exists(cpuset_path):
+ # File content varies based on the environment:
+ # No Container: /
+ # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
+ # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
+ # Podman: /../../../../../..
+ # While this was true and worked well for a long time, this seems to be no longer accurate
+ # with newer Docker / Podman versions and/or with cgroupv2. That's why the /proc/self/mountinfo
+ # detection further down is done when this test is inconclusive.
+ with open(cpuset_path, 'rb') as f:
+ contents = f.read().decode('utf-8')
+
+ cgroup_path, cgroup_name = os.path.split(contents.strip())
+
+ if cgroup_path == '/docker':
+ container_id = cgroup_name
+ container_type = 'docker'
+
+ if cgroup_path == '/azpl_job':
+ container_id = cgroup_name
+ container_type = 'azure_pipelines'
+
+ if cgroup_path == '/actions_job':
+ container_id = cgroup_name
+ container_type = 'github_actions'
+
+ if not container_id and os.path.exists(mountinfo_path):
+ with open(mountinfo_path, 'rb') as f:
+ contents = f.read().decode('utf-8')
+
+ # As to why this works, see the explanations by Matt Clay in
+ # https://github.com/ansible/ansible/blob/80d2f8da02052f64396da6b8caaf820eedbf18e2/test/lib/ansible_test/_internal/docker_util.py#L571-L610
+
+ for line in contents.splitlines():
+ parts = line.split()
+ if len(parts) >= 5 and parts[4] == '/etc/hostname':
+ m = re.match('.*/([a-f0-9]{64})/hostname$', parts[3])
+ if m:
+ container_id = m.group(1)
+ container_type = 'docker'
+
+ m = re.match('.*/([a-f0-9]{64})/userdata/hostname$', parts[3])
+ if m:
+ container_id = m.group(1)
+ container_type = 'podman'
+
+ module.exit_json(ansible_facts=dict(
+ ansible_module_running_in_container=container_id != '',
+ ansible_module_container_id=container_id,
+ ansible_module_container_type=container_type,
+ ))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_compose.py b/ansible_collections/community/docker/plugins/modules/docker_compose.py
new file mode 100644
index 00000000..14ea8331
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_compose.py
@@ -0,0 +1,1220 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services. B(This module requires docker-compose < 2.0.0.)
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ env_file:
+ description:
+ - By default environment files are loaded from a C(.env) file located directly under the I(project_src) directory.
+ - I(env_file) can be used to specify the path of a custom environment file instead.
+ - The path is relative to the I(project_src) directory.
+ - Requires C(docker-compose) version 1.25.0 or greater.
+ - "Note: C(docker-compose) versions C(<=1.28) load the env file from the current working directory of the
+ C(docker-compose) command rather than I(project_src)."
+ type: path
+ version_added: 1.9.0
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ profiles:
+ description:
+ - List of profiles to enable when starting services.
+ - Equivalent to C(docker-compose --profile).
+ - Requires C(docker-compose) version 1.28.0 or greater.
+ type: list
+ elements: str
+ version_added: 1.8.0
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: true
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: false
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: false
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: false
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: false
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: false
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ - Requires C(docker-compose) version 1.17.0 or greater for full support. For older versions, the services will
+ first be started and then stopped when the service is supposed to be created as stopped.
+ type: bool
+ default: false
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: false
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: false
+ timeout:
+ description:
+ - Timeout in seconds for container shutdown when attached or when containers are already running.
+ - By default C(compose) will use a C(10s) timeout unless C(default_grace_period) is defined for a
+ particular service in the I(project_src).
+ type: int
+ default: null
+ use_ssh_client:
+ description:
+ - Currently ignored for this module, but might suddenly be supported later on.
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0"
+ - "docker-compose >= 1.7.0, < 2.0.0"
+ - "Docker API >= 1.25"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Tear down existing services
+ community.docker.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.docker.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.docker.docker_compose:
+ project_src: flask
+ build: false
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: not output.changed
+
+ - name: Stop all services
+ community.docker.docker_compose:
+ project_src: flask
+ build: false
+ stopped: true
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not output.services.web.flask_web_1.state.running"
+ - "not output.services.db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.docker.docker_compose:
+ project_src: flask
+ build: false
+ restarted: true
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "output.services.web.flask_web_1.state.running"
+ - "output.services.db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - community.docker.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline Compose file version 2
+ # https://docs.docker.com/compose/compose-file/compose-file-v2/
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - community.docker.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.docker.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "output.services.web.flask_web_1.state.running"
+ - "output.services.db.flask_db_1.state.running"
+
+- name: Run with inline Compose file version 1
+ # https://docs.docker.com/compose/compose-file/compose-file-v1/
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - community.docker.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.docker.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "output.services.web.flask_web_1.state.running"
+ - "output.services.db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(true)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [to_native(w) for w in warnings],
+ 'errors': [to_native(e) for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.env_file:
+ self.options[u'--env-file'] = self.env_file
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if self.profiles:
+ self.options[u'--profile'] = self.profiles
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ to_native(HAS_COMPOSE_EXC))
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % to_native(HAS_YAML_EXC))
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, to_native(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % to_native(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] |= pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] |= build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action == 'start' and self.stopped:
+ # In case the only action is starting, and the user requested
+ # that the service should be stopped, ignore this service.
+ continue
+ if not self._service_profile_enabled(service):
+ continue
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ up_kwargs = {
+ 'service_names': service_names,
+ 'start_deps': start_deps,
+ 'strategy': converge,
+ 'do_build': do_build,
+ 'detached': detached,
+ 'remove_orphans': self.remove_orphans,
+ 'timeout': self.timeout,
+ }
+
+ if LooseVersion(compose_version) >= LooseVersion('1.17.0'):
+ up_kwargs['start'] = not self.stopped
+ elif self.stopped:
+ self.client.module.warn(
+ "The 'stopped' option requires docker-compose version >= 1.17.0. " +
+ "This task was run with docker-compose version %s." % compose_version
+ )
+
+ self.project.up(**up_kwargs)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] |= stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] |= restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] |= scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % to_native(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % to_native(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % to_native(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def _service_profile_enabled(self, service):
+ """Returns `True` if the service has no profiles defined or has a profile which is among
+ the profiles passed to the `docker compose up` command. Otherwise returns `False`.
+ """
+ if LooseVersion(compose_version) < LooseVersion('1.28.0'):
+ return True
+ return service.enabled_for_profiles(self.profiles or [])
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, to_native(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ env_file=dict(type='path'),
+ files=dict(type='list', elements='path'),
+ profiles=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int')
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_config.py b/ansible_collections/community/docker/plugins/modules/docker_config.py
new file mode 100644
index 00000000..9f55e0f0
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_config.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_config
+
+short_description: Manage docker configs.
+
+description:
+ - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
+ - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
+ unless the I(force) option is set.
+ - Updates to configs are performed by removing the config and creating it again.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_2_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ data:
+ description:
+ - The value of the config.
+ - Mutually exclusive with I(data_src). One of I(data) and I(data_src) is required if I(state=present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: false
+ data_src:
+ description:
+ - The file on the target from which to read the config.
+ - Mutually exclusive with I(data). One of I(data) and I(data_src) is required if I(state=present).
+ type: path
+ version_added: 1.10.0
+ labels:
+ description:
+ - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
+ - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing config.
+ - If C(true), an existing config will be replaced, even if it has not been changed.
+ type: bool
+ default: false
+ rolling_versions:
+ description:
+ - If set to C(true), configs are created with an increasing version number appended to their name.
+ - Adds a label containing the version number to the managed configs with the name C(ansible_version).
+ type: bool
+ default: false
+ version_added: 2.2.0
+ versions_to_keep:
+ description:
+ - When using I(rolling_versions), the number of old versions of the config to keep.
+ - Extraneous old configs are deleted after the new one is created.
+ - Set to C(-1) to keep everything or to C(0) or C(1) to keep only the current one.
+ type: int
+ default: 5
+ version_added: 2.2.0
+ name:
+ description:
+ - The name of the config.
+ type: str
+ required: true
+ state:
+ description:
+ - Set to C(present), if the config should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ template_driver:
+ description:
+ - Set to C(golang) to use a Go template in I(data) or a Go template file in I(data_src).
+ type: str
+ choices:
+ - golang
+ version_added: 2.5.0
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
+ - "Docker API >= 1.30"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - John Hu (@ushuz)
+'''
+
+EXAMPLES = '''
+
+- name: Create config foo (from a file on the control machine)
+ community.docker.docker_config:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Create config foo (from a file on the target machine)
+ community.docker.docker_config:
+ name: foo
+ data_src: /path/to/config/file
+ state: present
+
+- name: Change the config data
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the config
+ two: '2'
+ state: present
+
+- name: No change
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing config
+ state: present
+
+- name: Update an existing label
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the config
+ one: '1'
+ state: present
+
+- name: Force the (re-)creation of the config
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ force: true
+ state: present
+
+- name: Remove config foo
+ community.docker.docker_config:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+config_id:
+ description:
+ - The ID assigned by Docker to the config object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+config_name:
+ description:
+ - The name of the created config object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'awesome_config'
+ version_added: 2.2.0
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ compare_generic,
+)
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+
+
+class ConfigManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ConfigManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ data_src = parameters.get('data_src')
+ if data_src is not None:
+ try:
+ with open(data_src, 'rb') as f:
+ self.data = f.read()
+ except Exception as exc:
+ self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc)))
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.rolling_versions = parameters.get('rolling_versions')
+ self.versions_to_keep = parameters.get('versions_to_keep')
+ self.template_driver = parameters.get('template_driver')
+
+ if self.rolling_versions:
+ self.version = 0
+ self.data_key = None
+ self.configs = []
+
+ def __call__(self):
+ self.get_config()
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ self.remove_old_versions()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_version(self, config):
+ try:
+ return int(config.get('Spec', {}).get('Labels', {}).get('ansible_version', 0))
+ except ValueError:
+ return 0
+
+ def remove_old_versions(self):
+ if not self.rolling_versions or self.versions_to_keep < 0:
+ return
+ if not self.check_mode:
+ while len(self.configs) > max(self.versions_to_keep, 1):
+ self.remove_config(self.configs.pop(0))
+
+ def get_config(self):
+ ''' Find an existing config. '''
+ try:
+ configs = self.client.configs(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
+
+ if self.rolling_versions:
+ self.configs = [
+ config
+ for config in configs
+ if config['Spec']['Name'].startswith('{name}_v'.format(name=self.name))
+ ]
+ self.configs.sort(key=self.get_version)
+ else:
+ self.configs = [
+ config for config in configs if config['Spec']['Name'] == self.name
+ ]
+
+ def create_config(self):
+ ''' Create a new config '''
+ config_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.rolling_versions:
+ self.version += 1
+ labels['ansible_version'] = str(self.version)
+ self.name = '{name}_v{version}'.format(name=self.name, version=self.version)
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ # only use templating argument when self.template_driver is defined
+ kwargs = {}
+ if self.template_driver:
+ kwargs['templating'] = {
+ 'name': self.template_driver
+ }
+ config_id = self.client.create_config(self.name, self.data, labels=labels, **kwargs)
+ self.configs += self.client.configs(filters={'id': config_id})
+ except APIError as exc:
+ self.client.fail("Error creating config: %s" % to_native(exc))
+
+ if isinstance(config_id, dict):
+ config_id = config_id['ID']
+
+ return config_id
+
+ def remove_config(self, config):
+ try:
+ if not self.check_mode:
+ self.client.remove_config(config['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing config %s: %s" % (config['Spec']['Name'], to_native(exc)))
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the config '''
+ if self.configs:
+ config = self.configs[-1]
+ self.results['config_id'] = config['ID']
+ self.results['config_name'] = config['Spec']['Name']
+ data_changed = False
+ template_driver_changed = False
+ attrs = config.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ else:
+ if not self.force:
+ self.client.module.warn("'ansible_key' label not found. Config will not be changed unless the force parameter is set to 'true'")
+ # template_driver has changed if it was set in the previous config
+ # and now it differs, or if it wasn't set but now it is.
+ if attrs.get('Templating', {}).get('Name'):
+ if attrs['Templating']['Name'] != self.template_driver:
+ template_driver_changed = True
+ elif self.template_driver:
+ template_driver_changed = True
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if self.rolling_versions:
+ self.version = self.get_version(config)
+ if data_changed or template_driver_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the config
+ if not self.rolling_versions:
+ self.absent()
+ config_id = self.create_config()
+ self.results['changed'] = True
+ self.results['config_id'] = config_id
+ self.results['config_name'] = self.name
+ else:
+ self.results['changed'] = True
+ self.results['config_id'] = self.create_config()
+ self.results['config_name'] = self.name
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the config '''
+ if self.configs:
+ for config in self.configs:
+ self.remove_config(config)
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str'),
+ data_is_b64=dict(type='bool', default=False),
+ data_src=dict(type='path'),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False),
+ rolling_versions=dict(type='bool', default=False),
+ versions_to_keep=dict(type='int', default=5),
+ template_driver=dict(type='str', choices=['golang']),
+ )
+
+ required_if = [
+ ('state', 'present', ['data', 'data_src'], True),
+ ]
+
+ mutually_exclusive = [
+ ('data', 'data_src'),
+ ]
+
+ option_minimal_versions = dict(
+ template_driver=dict(docker_py_version='5.0.3', docker_api_version='1.37'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive,
+ min_docker_version='2.6.0',
+ min_docker_api_version='1.30',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ ConfigManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_container.py b/ansible_collections/community/docker/plugins/modules/docker_container.py
new file mode 100644
index 00000000..9d1ed416
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_container.py
@@ -0,0 +1,1288 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container
+
+short_description: manage Docker containers
+
+description:
+ - Manage the life cycle of Docker containers.
+ - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken.
+
+notes:
+ - For most config changes, the container needs to be recreated. This means that the existing container has to be destroyed and
+ a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to
+ prevent this.
+ - If the module needs to recreate the container, it will only use the options provided to the module to create the
+ new container (except I(image)). Therefore, always specify B(all) options relevant to the container.
+ - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - When trying to pull an image, the module assumes this is always changed in check mode.
+ diff_mode:
+ support: full
+
+options:
+ auto_remove:
+ description:
+ - Enable auto-removal of the container on daemon side when the container's process exits.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ blkio_weight:
+ description:
+ - Block IO (relative weight), between 10 and 1000.
+ type: int
+ capabilities:
+ description:
+ - List of capabilities to add to the container.
+ - This is equivalent to C(docker run --cap-add), or the docker-compose option C(cap_add).
+ type: list
+ elements: str
+ cap_drop:
+ description:
+ - List of capabilities to drop from the container.
+ type: list
+ elements: str
+ cgroupns_mode:
+ description:
+ - Specify the cgroup namespace mode for the container.
+ - The Docker CLI calls this simply C(cgroupns).
+ type: str
+ choices:
+ - host
+ - private
+ version_added: 3.0.0
+ cgroup_parent:
+ description:
+ - Specify the parent cgroup for the container.
+ type: str
+ version_added: 1.1.0
+ cleanup:
+ description:
+ - Use with I(detach=false) to remove the container after successful execution.
+ type: bool
+ default: false
+ command:
+ description:
+ - Command to execute when the container starts. A command may be either a string or a list.
+ - Prior to version 2.4, strings were split on commas.
+ - See I(command_handling) for differences in how strings and lists are handled.
+ type: raw
+ comparisons:
+ description:
+ - Allows to specify how properties of existing containers are compared with
+ module options to decide whether the container should be recreated / updated
+ or not.
+ - Only options which correspond to the state of a container as handled by the
+ Docker daemon can be specified, as well as I(networks).
+ - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
+ and C(allow_more_present).
+ - If C(strict) is specified, values are tested for equality, and changes always
+ result in updating or restarting. If C(ignore) is specified, changes are ignored.
+ - C(allow_more_present) is allowed only for lists, sets and dicts. If it is
+ specified for lists or sets, the container will only be updated or restarted if
+ the module option contains a value which is not present in the container's
+ options. If the option is specified for a dict, the container will only be updated
+ or restarted if the module option contains a key which is not present in the
+ container's option, or if the value of a key present differs.
+ - The wildcard option C(*) can be used to set one of the default values C(strict)
+ or C(ignore) to I(all) comparisons which are not explicitly set to other values.
+ - See the examples for details.
+ type: dict
+ container_default_behavior:
+ description:
+ - In older versions of this module, various module options used to have default values.
+ This caused problems with containers which use different values for these options.
+ - The default value is now C(no_defaults). To restore the old behavior, set it to
+ C(compatibility), which will ensure that the default values are used when the values
+ are not explicitly specified by the user.
+ - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory),
+ I(paused), I(privileged), I(read_only) and I(tty) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ default: no_defaults
+ command_handling:
+ description:
+ - The default behavior for I(command) (when provided as a list) and I(entrypoint) is to
+ convert them to strings without considering shell quoting rules. (For comparing idempotency,
+ the resulting string is split considering shell quoting rules.)
+ - Also, setting I(command) to an empty list of string, and setting I(entrypoint) to an empty
+ list will be handled as if these options are not specified. This is different from idempotency
+ handling for other container-config related options.
+ - When this is set to C(compatibility), which was the default until community.docker 3.0.0, the
+ current behavior will be kept.
+ - When this is set to C(correct), these options are kept as lists, and an empty value or empty
+ list will be handled correctly for idempotency checks. This has been the default since
+ community.docker 3.0.0.
+ type: str
+ choices:
+ - compatibility
+ - correct
+ version_added: 1.9.0
+ default: correct
+ cpu_period:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) period.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpu_quota:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) quota.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpus:
+ description:
+ - Specify how much of the available CPU resources a container can use.
+ - A value of C(1.5) means that at most one and a half CPU (core) will be used.
+ type: float
+ cpuset_cpus:
+ description:
+ - CPUs in which to allow execution C(1,3) or C(1-3).
+ type: str
+ cpuset_mems:
+ description:
+ - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1).
+ type: str
+ cpu_shares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ default_host_ip:
+ description:
+ - Define the default host IP to use.
+ - Must be an empty string, an IPv4 address, or an IPv6 address.
+ - With Docker 20.10.2 or newer, this should be set to an empty string (C("")) to avoid the
+ port bindings without an explicit IP address to only bind to IPv4.
+ See U(https://github.com/ansible-collections/community.docker/issues/70) for details.
+ - By default, the module will try to auto-detect this value from the C(bridge) network's
+ C(com.docker.network.bridge.host_binding_ipv4) option. If it cannot auto-detect it, it
+ will fall back to C(0.0.0.0).
+ type: str
+ version_added: 1.2.0
+ detach:
+ description:
+ - Enable detached mode to leave the container running in background.
+ - If disabled, the task will reflect the status of the container run (failed if the command failed).
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(true).
+ type: bool
+ devices:
+ description:
+ - List of host device bindings to add to the container.
+ - "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)."
+ type: list
+ elements: str
+ device_read_bps:
+ description:
+ - "List of device path and read rate (bytes per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: true
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: true
+ device_write_bps:
+ description:
+ - "List of device and write rate (bytes per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: true
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: true
+ device_read_iops:
+ description:
+ - "List of device and read rate (IO per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: true
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: true
+ device_write_iops:
+ description:
+ - "List of device and write rate (IO per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: true
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: true
+ device_requests:
+ description:
+ - Allows to request additional resources, such as GPUs.
+ type: list
+ elements: dict
+ suboptions:
+ capabilities:
+ description:
+ - List of lists of strings to request capabilities.
+ - The top-level list entries are combined by OR, and for every list entry,
+ the entries in the list it contains are combined by AND.
+ - The driver tries to satisfy one of the sub-lists.
+ - Available capabilities for the C(nvidia) driver can be found at
+ U(https://github.com/NVIDIA/nvidia-container-runtime).
+ type: list
+ elements: list
+ count:
+ description:
+ - Number or devices to request.
+ - Set to C(-1) to request all available devices.
+ type: int
+ device_ids:
+ description:
+ - List of device IDs.
+ type: list
+ elements: str
+ driver:
+ description:
+ - Which driver to use for this device.
+ type: str
+ options:
+ description:
+ - Driver-specific options.
+ type: dict
+ version_added: 0.1.0
+ dns_opts:
+ description:
+ - List of DNS options.
+ type: list
+ elements: str
+ dns_servers:
+ description:
+ - List of custom DNS servers.
+ type: list
+ elements: str
+ dns_search_domains:
+ description:
+ - List of custom DNS search domains.
+ type: list
+ elements: str
+ domainname:
+ description:
+ - Container domainname.
+ type: str
+ env:
+ description:
+ - Dictionary of key,value pairs.
+ - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example C("true")) in order to avoid data loss.
+ - Please note that if you are passing values in with Jinja2 templates, like C("{{ value }}"), you need to add C(| string) to prevent Ansible to
+ convert strings such as C("true") back to booleans. The correct way is to use C("{{ value | string }}").
+ type: dict
+ env_file:
+ description:
+ - Path to a file, present on the target, containing environment variables I(FOO=BAR).
+ - If variable also present in I(env), then the I(env) value will override.
+ type: path
+ entrypoint:
+ description:
+ - Command that overwrites the default C(ENTRYPOINT) of the image.
+ - See I(command_handling) for differences in how strings and lists are handled.
+ type: list
+ elements: str
+ etc_hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's C(/etc/hosts) file.
+ type: dict
+ exposed_ports:
+ description:
+ - List of additional container ports which informs Docker that the container
+ listens on the specified network ports at runtime.
+ - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not
+ need to be exposed again.
+ type: list
+ elements: str
+ aliases:
+ - exposed
+ - expose
+ force_kill:
+ description:
+ - Use the kill command when stopping a running container.
+ type: bool
+ default: false
+ aliases:
+ - forcekill
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work."
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ retries:
+ description:
+ - Consecutive number of failures needed to report unhealthy.
+ - The default used by the Docker daemon is C(3).
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ - The default used by the Docker daemon is C(0s).
+ type: str
+ hostname:
+ description:
+ - The container's hostname.
+ type: str
+ ignore_image:
+ description:
+ - When I(state) is C(present) or C(started), the module compares the configuration of an existing
+ container to requested configuration. The evaluation includes the image version. If the image
+ version in the registry does not match the container, the container will be recreated. You can
+ stop this behavior by setting I(ignore_image) to C(true).
+ - "B(Warning:) This option is ignored if C(image: ignore) or C(*: ignore) is specified in the
+ I(comparisons) option."
+ - "This option is deprecated since community.docker 3.2.0 and will be removed in community.docker 4.0.0.
+ Use C(image: ignore) in I(comparisons) instead of I(ignore_image=true)."
+ type: bool
+ default: false
+ image:
+ description:
+ - Repository path and tag used to create the container. If an image is not found or pull is true, the image
+ will be pulled from the registry. If no tag is included, C(latest) will be used.
+ - Can also be an image ID. If this is the case, the image is assumed to be available locally.
+ The I(pull) option is ignored for this case.
+ type: str
+ image_comparison:
+ description:
+ - Determines which image to use for idempotency checks that depend on image parameters.
+ - The default, C(desired-image), will use the image that is provided to the module via the I(image) parameter.
+ - C(current-image) will use the image that the container is currently using, if the container exists. It
+ falls back to the image that is provided in case the container does not yet exist.
+ - This affects the I(env), I(env_file), I(exposed_ports), I(labels), and I(volumes) options.
+ type: str
+ choices:
+ - desired-image
+ - current-image
+ default: desired-image
+ version_added: 3.0.0
+ image_label_mismatch:
+ description:
+ - How to handle labels inherited from the image that are not set explicitly.
+ - When C(ignore), labels that are present in the image but not specified in I(labels) will be
+ ignored. This is useful to avoid having to specify the image labels in I(labels) while keeping
+ labels I(comparisons) C(strict).
+ - When C(fail), if there are labels present in the image which are not set from I(labels), the
+ module will fail. This prevents introducing unexpected labels from the base image.
+ - "B(Warning:) This option is ignored unless C(labels: strict) or C(*: strict) is specified in
+ the I(comparisons) option."
+ type: str
+ choices:
+ - 'ignore'
+ - 'fail'
+ default: ignore
+ version_added: 2.6.0
+ image_name_mismatch:
+ description:
+ - Determines what the module does if the image matches, but the image name in the container's configuration
+ does not match the image name provided to the module.
+ - "This is ignored if C(image: ignore) is set in I(comparisons)."
+ - If set to C(recreate) the container will be recreated.
+ - If set to C(ignore) the container will not be recreated because of this. It might still get recreated for other reasons.
+ This has been the default behavior of the module for a long time, but might not be what users expect.
+ type: str
+ choices:
+ - recreate
+ - ignore
+ default: ignore
+ version_added: 3.2.0
+ init:
+ description:
+ - Run an init inside the container that forwards signals and reaps processes.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ interactive:
+ description:
+ - Keep stdin open after a container is launched, even if not attached.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ ipc_mode:
+ description:
+ - Set the IPC mode for the container.
+ - Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use
+ the host's IPC namespace within the container.
+ type: str
+ keep_volumes:
+ description:
+ - Retain anonymous volumes associated with a removed container.
+ type: bool
+ default: true
+ kill_signal:
+ description:
+ - Override default signal used to kill a running container.
+ type: str
+ kernel_memory:
+ description:
+ - "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ type: dict
+ links:
+ description:
+ - List of name aliases for linked containers in the format C(container_name:alias).
+ - Setting this will force container to be restarted.
+ type: list
+ elements: str
+ log_driver:
+ description:
+ - Specify the logging driver. Docker uses C(json-file) by default.
+ - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
+ type: str
+ log_options:
+ description:
+ - Dictionary of options specific to the chosen I(log_driver).
+ - See U(https://docs.docker.com/engine/admin/logging/overview/) for details.
+ - I(log_driver) needs to be specified for I(log_options) to take effect, even if using the default C(json-file) driver.
+ type: dict
+ aliases:
+ - log_opt
+ mac_address:
+ description:
+ - Container MAC address (for example, C(92:d0:c6:0a:29:33)).
+ type: str
+ memory:
+ description:
+ - "Memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C("0").
+ type: str
+ memory_reservation:
+ description:
+ - "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swap:
+ description:
+ - "Total memory limit (memory + swap) in format C(<number>[<unit>]), or
+ the special values C(unlimited) or C(-1) for unlimited swap usage.
+ Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
+ C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swappiness:
+ description:
+ - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
+ - If not set, the value will be remain the same if container exists and will be inherited
+ from the host machine if it is (re-)created.
+ type: int
+ mounts:
+ type: list
+ elements: dict
+ description:
+ - Specification for mounts to be added to the container. More powerful alternative to I(volumes).
+ suboptions:
+ target:
+ description:
+ - Path inside the container.
+ type: str
+ required: true
+ source:
+ description:
+ - Mount source.
+ - For example, this can be a volume name or a host path.
+ - If not supplied when I(type=volume) an anonymous volume will be created.
+ type: str
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows.
+ type: str
+ choices:
+ - bind
+ - npipe
+ - tmpfs
+ - volume
+ default: volume
+ read_only:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ consistency:
+ description:
+ - The consistency requirement for the mount.
+ type: str
+ choices:
+ - cached
+ - consistent
+ - default
+ - delegated
+ propagation:
+ description:
+ - Propagation mode. Only valid for the C(bind) type.
+ type: str
+ choices:
+ - private
+ - rprivate
+ - shared
+ - rshared
+ - slave
+ - rslave
+ no_copy:
+ description:
+ - False if the volume should be populated with the data from the target. Only valid for the C(volume) type.
+ - The default value is C(false).
+ type: bool
+ labels:
+ description:
+ - User-defined name and labels for the volume. Only valid for the C(volume) type.
+ type: dict
+ volume_driver:
+ description:
+ - Specify the volume driver. Only valid for the C(volume) type.
+ - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: str
+ volume_options:
+ description:
+ - Dictionary of options specific to the chosen volume_driver. See
+ L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: dict
+ tmpfs_size:
+ description:
+ - "The size for the tmpfs mount in bytes in format <number>[<unit>]."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ tmpfs_mode:
+ description:
+ - The permission mode for the tmpfs mount.
+ type: str
+ name:
+ description:
+ - Assign a name to a new container or match an existing container.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: true
+ network_mode:
+ description:
+ - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default).
+ - "Since community.docker 2.0.0, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network,
+ the default value for I(network_mode) is the name of the first network in the I(networks) list. You can prevent this
+ by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if
+ I(network_mode) is not specified."
+ type: str
+ userns_mode:
+ description:
+ - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string.
+ type: str
+ networks:
+ description:
+ - List of networks the container belongs to.
+ - For examples of the data structure and usage see EXAMPLES below.
+ - "To remove a container from one or more networks, use C(networks: strict) in the I(comparisons) option."
+ - "If I(networks_cli_compatible) is set to C(false), this will not remove the default network if I(networks) is specified.
+ This is different from the behavior of C(docker run ...). You need to explicitly use C(networks: strict) in I(comparisons)
+ to enforce the removal of the default network (and all other networks not explicitly mentioned in I(networks)) in that case."
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The network's name.
+ type: str
+ required: true
+ ipv4_address:
+ description:
+ - The container's IPv4 address in this network.
+ type: str
+ ipv6_address:
+ description:
+ - The container's IPv6 address in this network.
+ type: str
+ links:
+ description:
+ - A list of containers to link to.
+ type: list
+ elements: str
+ aliases:
+ description:
+ - List of aliases for this container in this network. These names
+ can be used in the network to reach this container.
+ type: list
+ elements: str
+ networks_cli_compatible:
+ description:
+ - "If I(networks_cli_compatible) is set to C(true) (default), this module will behave as
+ C(docker run --network) and will B(not) add the default network if I(networks) is
+ specified. If I(networks) is not specified, the default network will be attached."
+ - "When I(networks_cli_compatible) is set to C(false) and networks are provided to the module
+ via the I(networks) option, the module behaves differently than C(docker run --network):
+ C(docker run --network other) will create a container with network C(other) attached,
+ but the default network not attached. This module with I(networks: {name: other}) will
+ create a container with both C(default) and C(other) attached. If C(networks: strict)
+ or C(*: strict) is set in I(comparisons), the C(default) network will be removed afterwards."
+ type: bool
+ default: true
+ oom_killer:
+ description:
+ - Whether or not to disable OOM Killer for the container.
+ type: bool
+ oom_score_adj:
+ description:
+ - An integer value containing the score given to the container in order to tune
+ OOM killer preferences.
+ type: int
+ output_logs:
+ description:
+ - If set to true, output of the container command will be printed.
+ - Only effective when I(log_driver) is set to C(json-file), C(journald), or C(local).
+ type: bool
+ default: false
+ paused:
+ description:
+ - Use with the started state to pause running processes inside the container.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ pid_mode:
+ description:
+ - Set the PID namespace mode for the container.
+ type: str
+ pids_limit:
+ description:
+ - Set PIDs limit for the container. It accepts an integer value.
+ - Set C(-1) for unlimited PIDs.
+ type: int
+ platform:
+ description:
+ - Platform for the container in the format C(os[/arch[/variant]]).
+ - "Please note that inspecting the container does not always return the exact platform string used to
+ create the container. This can cause idempotency to break for this module. Use the I(comparisons) option
+ with C(platform: ignore) to prevent accidental recreation of the container due to this."
+ type: str
+ version_added: 3.0.0
+ privileged:
+ description:
+ - Give extended privileges to the container.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ publish_all_ports:
+ description:
+ - Publish all ports to the host.
+ - Any specified port bindings from I(published_ports) will remain intact when C(true).
+ type: bool
+ version_added: 1.8.0
+ published_ports:
+ description:
+ - List of ports to publish from the container to the host.
+ - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
+ container port, 9000 is a host port, and 0.0.0.0 is a host interface."
+ - Port ranges can be used for source and destination ports. If two ranges with
+ different lengths are specified, the shorter range will be used.
+ Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned
+ to the first port of the destination range, but to a free port in that range. This is the
+ same behavior as for C(docker) command line utility.
+ - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are B(not) allowed. This
+ is different from the C(docker) command line utility. Use the R(dig lookup,ansible_collections.community.general.dig_lookup)
+ to resolve hostnames."
+ - If I(networks) parameter is provided, will inspect each network to see if there exists
+ a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4).
+ If such a network is found, then published ports where no host IP address is specified
+ will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4).
+ Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4)
+ value encountered in the list of I(networks) is the one that will be used.
+ - The value C(all) was allowed in earlier versions of this module. Support for it was removed in
+ community.docker 3.0.0. Use the I(publish_all_ports) option instead.
+ type: list
+ elements: str
+ aliases:
+ - ports
+ pull:
+ description:
+ - If true, always pull the latest version of an image. Otherwise, will only pull an image
+ when missing.
+ - "B(Note:) images are only pulled when specified by name. If the image is specified
+ as a image ID (hash), it cannot be pulled."
+ type: bool
+ default: false
+ purge_networks:
+ description:
+ - Remove the container from ALL networks not included in I(networks) parameter.
+ - Any default networks such as C(bridge), if not found in I(networks), will be removed as well.
+ - "This option is deprecated since community.docker 3.2.0 and will be removed in community.docker 4.0.0.
+ Use C(networks: strict) in I(comparisons) instead of I(purge_networks=true) and make sure that
+ I(networks) is specified. If you want to remove all networks, specify I(networks: [])."
+ type: bool
+ default: false
+ read_only:
+ description:
+ - Mount the container's root file system as read-only.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ recreate:
+ description:
+ - Use with present and started states to force the re-creation of an existing container.
+ type: bool
+ default: false
+ removal_wait_timeout:
+ description:
+ - When removing an existing container, the docker daemon API call exists after the container
+ is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O
+ load, removal can take longer. By default, the module will wait until the container has been
+ removed before trying to (re-)create it, however long this takes.
+ - By setting this option, the module will wait at most this many seconds for the container to be
+ removed. If the container is still in the removal phase after this many seconds, the module will
+ fail.
+ type: float
+ restart:
+ description:
+ - Use with started state to force a matching container to be stopped and restarted.
+ type: bool
+ default: false
+ restart_policy:
+ description:
+ - Container restart policy.
+ - Place quotes around C(no) option.
+ type: str
+ choices:
+ - 'no'
+ - 'on-failure'
+ - 'always'
+ - 'unless-stopped'
+ restart_retries:
+ description:
+ - Use with restart policy to control maximum number of restart attempts.
+ type: int
+ runtime:
+ description:
+ - Runtime to use for the container.
+ type: str
+ shm_size:
+ description:
+ - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M).
+ type: str
+ security_opts:
+ description:
+ - List of security options in the form of C("label:user:User").
+ type: list
+ elements: str
+ state:
+ description:
+ - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container
+ rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.'
+ - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
+ container matches the name, a container will be created. If a container matches the name but the provided configuration
+ does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
+ with the requested config.'
+ - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running
+ state. Use I(restart) to force a matching container to be stopped and restarted.'
+ - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped
+ state.'
+ - "To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the
+ image version will be taken into account, you can also use the C(image: ignore) in the I(comparisons) option."
+ - Use the I(recreate) option to always force re-creation of a matching container, even if it is running.
+ - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is
+ C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ - Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ type: str
+ default: started
+ choices:
+ - absent
+ - present
+ - stopped
+ - started
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ type: str
+ stop_timeout:
+ description:
+ - Number of seconds to wait for the container to stop before sending C(SIGKILL).
+ When the container is created by this module, its C(StopTimeout) configuration
+ will be set to this value.
+ - When the container is stopped, will be used as a timeout for stopping the
+ container. In case the container has a custom C(StopTimeout) configuration,
+ the behavior depends on the version of the docker daemon. New versions of
+ the docker daemon will always use the container's configured C(StopTimeout)
+ value if it has been configured.
+ type: int
+ storage_opts:
+ description:
+ - Storage driver options for this container as a key-value mapping.
+ type: dict
+ version_added: 1.3.0
+ tmpfs:
+ description:
+ - Mount a tmpfs directory.
+ type: list
+ elements: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ ulimits:
+ description:
+ - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)."
+ type: list
+ elements: str
+ sysctls:
+ description:
+ - Dictionary of key,value pairs.
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used and optionally the groupname or GID for the specified command.
+ - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)."
+ type: str
+ uts:
+ description:
+ - Set the UTS namespace mode for the container.
+ type: str
+ volumes:
+ description:
+ - List of volumes to mount within the container.
+ - "Use docker CLI-style syntax: C(/host:/container[:mode])"
+ - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
+ C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and
+ C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes."
+ - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume.
+ - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
+ C(z), and C(Z)."
+ type: list
+ elements: str
+ volume_driver:
+ description:
+ - The container volume driver.
+ type: str
+ volumes_from:
+ description:
+ - List of container names or IDs to get volumes from.
+ type: list
+ elements: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ type: str
+
+author:
+ - "Cove Schneider (@cove)"
+ - "Joshua Conner (@joshuaconner)"
+ - "Pavel Antonov (@softzilla)"
+ - "Thomas Steinbach (@ThomasSteinbach)"
+ - "Philippe Jandot (@zfil)"
+ - "Daan Oosterveld (@dusdanig)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Kassian Sun (@kassiansun)"
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Create a data container
+ community.docker.docker_container:
+ name: mydata
+ image: busybox
+ volumes:
+ - /data
+
+- name: Re-create a redis container
+ community.docker.docker_container:
+ name: myredis
+ image: redis
+ command: redis-server --appendonly yes
+ state: present
+ recreate: true
+ exposed_ports:
+ - 6379
+ volumes_from:
+ - mydata
+
+- name: Restart a container
+ community.docker.docker_container:
+ name: myapplication
+ image: someuser/appimage
+ state: started
+ restart: true
+ links:
+ - "myredis:aliasedredis"
+ devices:
+ - "/dev/sda:/dev/xvda:rwm"
+ ports:
+ # Publish container port 9000 as host port 8080
+ - "8080:9000"
+ # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1
+ - "127.0.0.1:8081:9001/udp"
+ # Publish container port 9002 as a random host port
+ - "9002"
+ # Publish container port 9003 as a free host port in range 8000-8100
+ # (the host port will be selected by the Docker daemon)
+ - "8000-8100:9003"
+ # Publish container ports 9010-9020 to host ports 7000-7010
+ - "7000-7010:9010-9020"
+ env:
+ SECRET_KEY: "ssssh"
+ # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
+ BOOLEAN_KEY: "yes"
+
+- name: Container present
+ community.docker.docker_container:
+ name: mycontainer
+ state: present
+ image: ubuntu:14.04
+ command: sleep infinity
+
+- name: Stop a container
+ community.docker.docker_container:
+ name: mycontainer
+ state: stopped
+
+- name: Start 4 load-balanced containers
+ community.docker.docker_container:
+ name: "container{{ item }}"
+ recreate: true
+ image: someuser/anotherappimage
+ command: sleep 1d
+ with_sequence: count=4
+
+- name: Remove container
+ community.docker.docker_container:
+ name: ohno
+ state: absent
+
+- name: Syslogging output
+ community.docker.docker_container:
+ name: myservice
+ image: busybox
+ log_driver: syslog
+ log_options:
+ syslog-address: tcp://my-syslog-server:514
+ syslog-facility: daemon
+ # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
+ # older docker installs, use "syslog-tag" instead
+ tag: myservice
+
+- name: Create db container and connect to network
+ community.docker.docker_container:
+ name: db_test
+ image: "postgres:latest"
+ networks:
+ - name: "{{ docker_network_name }}"
+
+- name: Start container, connect to network and link
+ community.docker.docker_container:
+ name: sleeper
+ image: ubuntu:14.04
+ networks:
+ - name: TestingNet
+ ipv4_address: "172.16.1.100"
+ aliases:
+ - sleepyzz
+ links:
+ - db_test:db
+ - name: TestingNet2
+
+- name: Start a container with a command
+ community.docker.docker_container:
+ name: sleepy
+ image: ubuntu:14.04
+ command: ["sleep", "infinity"]
+
+- name: Add container to networks
+ community.docker.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ ipv4_address: 172.16.1.18
+ links:
+ - sleeper
+ - name: TestingNet2
+ ipv4_address: 172.16.10.20
+
+- name: Update network with aliases
+ community.docker.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ aliases:
+ - sleepyz
+ - zzzz
+
+- name: Remove container from one network
+ community.docker.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet2
+ comparisons:
+ networks: strict
+
+- name: Remove container from all networks
+ community.docker.docker_container:
+ name: sleepy
+ comparisons:
+ networks: strict
+
+- name: Start a container and use an env file
+ community.docker.docker_container:
+ name: agent
+ image: jenkinsci/ssh-slave
+ env_file: /var/tmp/jenkins/agent.env
+
+- name: Create a container with limited capabilities
+ community.docker.docker_container:
+ name: sleepy
+ image: ubuntu:16.04
+ command: sleep infinity
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+
+- name: Finer container restart/update control
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ volumes:
+ - /tmp:/tmp
+ comparisons:
+ image: ignore # do not restart containers with older versions of the image
+ env: strict # we want precisely this environment
+ volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
+
+- name: Finer container restart/update control II
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ comparisons:
+ '*': ignore # by default, ignore *all* options (including image)
+ env: strict # except for environment variables; there, we want to be strict
+
+- name: Start container with healthstatus
+ community.docker.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Remove healthcheck from container
+ community.docker.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # The "NONE" check needs to be specified
+ test: ["NONE"]
+
+- name: Create a tmpfs with a size and mode
+ community.docker.docker_container:
+ name: tmpfs test
+ image: ubuntu:22.04
+ state: started
+ mounts:
+ - type: tmpfs
+ target: /cache
+ tmpfs_mode: "1700" # only readable to the owner
+ tmpfs_size: "16G"
+
+- name: Start container with block device read limit
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_read_bps:
+ # Limit read rate for /dev/sda to 20 mebibytes per second
+ - path: /dev/sda
+ rate: 20M
+ device_read_iops:
+ # Limit read rate for /dev/sdb to 300 IO per second
+ - path: /dev/sdb
+ rate: 300
+
+- name: Start container with GPUs
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_requests:
+ - # Add some specific devices to this container
+ device_ids:
+ - '0'
+ - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ - # Add nVidia GPUs to this container
+ driver: nvidia
+ count: -1 # this means we want all
+ capabilities:
+ # We have one OR condition: 'gpu' AND 'utility'
+ - - gpu
+ - utility
+ # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities
+ # for a list of capabilities supported by the nvidia driver
+
+- name: Start container with storage options
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ storage_opts:
+ # Limit root filesystem to 12 MB - note that this requires special storage backends
+ # (https://fabianlee.org/2020/01/15/docker-use-overlay2-with-an-xfs-backing-filesystem-to-limit-rootfs-size/)
+ size: 12m
+'''
+
+RETURN = '''
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Empty if I(state) is C(absent).
+ - If I(detach=false), will include C(Output) attribute containing any output from container run.
+ returned: success; or when I(state=started) and I(detach=false), and when waiting for the container result did not fail
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+status:
+ description:
+ - In case a container is started without detaching, this contains the exit code of the process in the container.
+ - Before community.docker 1.1.0, this was only returned when non-zero.
+ returned: when I(state=started) and I(detach=false), and when waiting for the container result did not fail
+ type: int
+ sample: 0
+'''
+
+from ansible_collections.community.docker.plugins.module_utils.module_container.docker_api import (
+ DockerAPIEngineDriver,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.module_container.module import (
+ run_module,
+)
+
+
+def main():
+ engine_driver = DockerAPIEngineDriver()
+ run_module(engine_driver)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py b/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py
new file mode 100644
index 00000000..f140bfe6
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py
@@ -0,0 +1,870 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_copy_into
+
+short_description: Copy a file into a Docker container
+
+version_added: 3.4.0
+
+description:
+ - Copy a file into a Docker container.
+ - Similar to C(docker cp).
+ - To copy files in a non-running container, you must provide the I(owner_id) and I(group_id) options.
+ This is also necessary if the container does not contain a C(/bin/sh) shell with an C(id) tool.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ details:
+ - Additional data will need to be transferred to compute diffs.
+ - The module uses R(the MAX_FILE_SIZE_FOR_DIFF ansible-core configuration,MAX_FILE_SIZE_FOR_DIFF)
+ to determine for how large files diffs should be computed.
+
+options:
+ container:
+ description:
+ - The name of the container to copy files to.
+ type: str
+ required: true
+ path:
+ description:
+ - Path to a file on the managed node.
+ - Mutually exclusive with I(content). One of I(content) and I(path) is required.
+ type: path
+ content:
+ description:
+ - The file's content.
+ - If you plan to provide binary data, provide it pre-encoded to base64, and set I(content_is_b64=true).
+ - Mutually exclusive with I(path). One of I(content) and I(path) is required.
+ type: str
+ content_is_b64:
+ description:
+ - If set to C(true), the content in I(content) is assumed to be Base64 encoded and
+ will be decoded before being used.
+ - To use binary I(content), it is better to keep it Base64 encoded and let it
+ be decoded by this option. Otherwise you risk the data to be interpreted as
+ UTF-8 and corrupted.
+ type: bool
+ default: false
+ container_path:
+ description:
+ - Path to a file inside the Docker container.
+ - Must be an absolute path.
+ type: str
+ required: true
+ follow:
+ description:
+ - This flag indicates that filesystem links in the Docker container, if they exist, should be followed.
+ type: bool
+ default: false
+ local_follow:
+ description:
+ - This flag indicates that filesystem links in the source tree (where the module is executed), if they exist, should be followed.
+ type: bool
+ default: true
+ owner_id:
+ description:
+ - The owner ID to use when writing the file to disk.
+ - If provided, I(group_id) must also be provided.
+ - If not provided, the module will try to determine the user and group ID for the current user in the container.
+ This will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available.
+ Also the container must be running.
+ type: int
+ group_id:
+ description:
+ - The group ID to use when writing the file to disk.
+ - If provided, I(owner_id) must also be provided.
+ - If not provided, the module will try to determine the user and group ID for the current user in the container.
+ This will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available.
+ Also the container must be running.
+ type: int
+ mode:
+ description:
+ - The file mode to use when writing the file to disk.
+ - Will use the file's mode from the source system if this option is not provided.
+ type: int
+ force:
+ description:
+ - If set to C(true), force writing the file (without performing any idempotency checks).
+ - If set to C(false), only write the file if it does not exist on the target. If a filesystem object exists at
+ the destination, the module will not do any change.
+ - If this option is not specified, the module will be idempotent. To verify idempotency, it will try to get information
+ on the filesystem object in the container, and if everything seems to match will download the file from the container
+ to compare it to the file to upload.
+ type: bool
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Copy a file into the container
+ community.docker.docker_container_copy_into:
+ container: mydata
+ path: /home/user/data.txt
+ container_path: /data/input.txt
+
+- name: Copy a file into the container with owner, group, and mode set
+ community.docker.docker_container_copy_into:
+ container: mydata
+ path: /home/user/bin/runme.o
+ container_path: /bin/runme
+ owner: 0 # root
+ group: 0 # root
+ mode: 0o755 # readable and executable by all users, writable by root
+'''
+
+RETURN = '''
+container_path:
+ description:
+ - The actual path in the container.
+ - Can only be different from I(container_path) when I(follow=true).
+ type: str
+ returned: success
+'''
+
+import base64
+import io
+import os
+import stat
+import traceback
+
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.copy import (
+ DockerFileCopyError,
+ DockerFileNotFound,
+ DockerUnexpectedError,
+ determine_user_group,
+ fetch_file_ex,
+ put_file,
+ put_file_content,
+ stat_file,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._scramble import generate_insecure_key, scramble
+
+
+def are_fileobjs_equal(f1, f2):
+ '''Given two (buffered) file objects, compare their contents.'''
+ blocksize = 65536
+ b1buf = b''
+ b2buf = b''
+ while True:
+ if f1 and len(b1buf) < blocksize:
+ f1b = f1.read(blocksize)
+ if not f1b:
+ # f1 is EOF, so stop reading from it
+ f1 = None
+ b1buf += f1b
+ if f2 and len(b2buf) < blocksize:
+ f2b = f2.read(blocksize)
+ if not f2b:
+ # f2 is EOF, so stop reading from it
+ f2 = None
+ b2buf += f2b
+ if not b1buf or not b2buf:
+ # At least one of f1 and f2 is EOF and all its data has
+ # been processed. If both are EOF and their data has been
+ # processed, the files are equal, otherwise not.
+ return not b1buf and not b2buf
+ # Compare the next chunk of data, and remove it from the buffers
+ buflen = min(len(b1buf), len(b2buf))
+ if b1buf[:buflen] != b2buf[:buflen]:
+ return False
+ b1buf = b1buf[buflen:]
+ b2buf = b2buf[buflen:]
+
+
+def are_fileobjs_equal_read_first(f1, f2):
+ '''Given two (buffered) file objects, compare their contents.
+
+ Returns a tuple (is_equal, content_of_f1), where the first element indicates
+ whether the two file objects have the same content, and the second element is
+ the content of the first file object.'''
+ blocksize = 65536
+ b1buf = b''
+ b2buf = b''
+ is_equal = True
+ content = []
+ while True:
+ if f1 and len(b1buf) < blocksize:
+ f1b = f1.read(blocksize)
+ if not f1b:
+ # f1 is EOF, so stop reading from it
+ f1 = None
+ b1buf += f1b
+ if f2 and len(b2buf) < blocksize:
+ f2b = f2.read(blocksize)
+ if not f2b:
+ # f2 is EOF, so stop reading from it
+ f2 = None
+ b2buf += f2b
+ if not b1buf or not b2buf:
+ # At least one of f1 and f2 is EOF and all its data has
+ # been processed. If both are EOF and their data has been
+ # processed, the files are equal, otherwise not.
+ is_equal = not b1buf and not b2buf
+ break
+ # Compare the next chunk of data, and remove it from the buffers
+ buflen = min(len(b1buf), len(b2buf))
+ if b1buf[:buflen] != b2buf[:buflen]:
+ is_equal = False
+ break
+ content.append(b1buf[:buflen])
+ b1buf = b1buf[buflen:]
+ b2buf = b2buf[buflen:]
+
+ content.append(b1buf)
+ if f1:
+ content.append(f1.read())
+
+ return is_equal, b''.join(content)
+
+
+def is_container_file_not_regular_file(container_stat):
+ for bit in (
+ # https://pkg.go.dev/io/fs#FileMode
+ 32 - 1, # ModeDir
+ 32 - 4, # ModeTemporary
+ 32 - 5, # ModeSymlink
+ 32 - 6, # ModeDevice
+ 32 - 7, # ModeNamedPipe
+ 32 - 8, # ModeSocket
+ 32 - 11, # ModeCharDevice
+ 32 - 13, # ModeIrregular
+ ):
+ if container_stat['mode'] & (1 << bit) != 0:
+ return True
+ return False
+
+
+def get_container_file_mode(container_stat):
+ mode = container_stat['mode'] & 0xFFF
+ if container_stat['mode'] & (1 << (32 - 9)) != 0: # ModeSetuid
+ mode |= stat.S_ISUID # set UID bit
+ if container_stat['mode'] & (1 << (32 - 10)) != 0: # ModeSetgid
+ mode |= stat.S_ISGID # set GID bit
+ if container_stat['mode'] & (1 << (32 - 12)) != 0: # ModeSticky
+ mode |= stat.S_ISVTX # sticky bit
+ return mode
+
+
+def add_other_diff(diff, in_path, member):
+ if diff is None:
+ return
+ diff['before_header'] = in_path
+ if member.isdir():
+ diff['before'] = '(directory)'
+ elif member.issym() or member.islnk():
+ diff['before'] = member.linkname
+ elif member.ischr():
+ diff['before'] = '(character device)'
+ elif member.isblk():
+ diff['before'] = '(block device)'
+ elif member.isfifo():
+ diff['before'] = '(fifo)'
+ elif member.isdev():
+ diff['before'] = '(device)'
+ elif member.isfile():
+ raise DockerUnexpectedError('should not be a regular file')
+ else:
+ diff['before'] = '(unknown filesystem object)'
+
+
+def retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat=None, link_target=None):
+ if diff is None:
+ return
+ if regular_stat is not None:
+ # First handle all filesystem object types that are not regular files
+ if regular_stat['mode'] & (1 << (32 - 1)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(directory)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 4)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(temporary file)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 5)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = link_target
+ return
+ elif regular_stat['mode'] & (1 << (32 - 6)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(device)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 7)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(named pipe)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 8)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(socket)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 11)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(character device)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 13)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(unknown filesystem object)'
+ return
+ # Check whether file is too large
+ if regular_stat['size'] > max_file_size_for_diff > 0:
+ diff['dst_larger'] = max_file_size_for_diff
+ return
+
+ # We need to get hold of the content
+ def process_none(in_path):
+ diff['before'] = ''
+
+ def process_regular(in_path, tar, member):
+ add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
+
+ def process_symlink(in_path, member):
+ diff['before_header'] = in_path
+ diff['before'] = member.linkname
+
+ def process_other(in_path, member):
+ add_other_diff(diff, in_path, member)
+
+ fetch_file_ex(
+ client,
+ container,
+ in_path=container_path,
+ process_none=process_none,
+ process_regular=process_regular,
+ process_symlink=process_symlink,
+ process_other=process_other,
+ follow_links=follow_links,
+ )
+
+
+def is_binary(content):
+ if b'\x00' in content:
+ return True
+ # TODO: better detection
+ # (ansible-core also just checks for 0x00, and even just sticks to the first 8k, so this isn't too bad...)
+ return False
+
+
+def are_fileobjs_equal_with_diff_of_first(f1, f2, size, diff, max_file_size_for_diff, container_path):
+ if diff is None:
+ return are_fileobjs_equal(f1, f2)
+ if size > max_file_size_for_diff > 0:
+ diff['dst_larger'] = max_file_size_for_diff
+ return are_fileobjs_equal(f1, f2)
+ is_equal, content = are_fileobjs_equal_read_first(f1, f2)
+ if is_binary(content):
+ diff['dst_binary'] = 1
+ else:
+ diff['before_header'] = container_path
+ diff['before'] = to_text(content)
+ return is_equal
+
+
+def add_diff_dst_from_regular_member(diff, max_file_size_for_diff, container_path, tar, member):
+ if diff is None:
+ return
+ if member.size > max_file_size_for_diff > 0:
+ diff['dst_larger'] = max_file_size_for_diff
+ return
+
+ tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
+ content = tar_f.read()
+ if is_binary(content):
+ diff['dst_binary'] = 1
+ else:
+ diff['before_header'] = container_path
+ diff['before'] = to_text(content)
+
+
+def copy_dst_to_src(diff):
+ if diff is None:
+ return
+ for f, t in [
+ ('dst_size', 'src_size'),
+ ('dst_binary', 'src_binary'),
+ ('before_header', 'after_header'),
+ ('before', 'after'),
+ ]:
+ if f in diff:
+ diff[t] = diff[f]
+ elif t in diff:
+ diff.pop(t)
+
+
+def is_file_idempotent(client, container, managed_path, container_path, follow_links, local_follow_links, owner_id, group_id, mode,
+ force=False, diff=None, max_file_size_for_diff=1):
+ # Retrieve information of local file
+ try:
+ file_stat = os.stat(managed_path) if local_follow_links else os.lstat(managed_path)
+ except OSError as exc:
+ if exc.errno == 2:
+ raise DockerFileNotFound('Cannot find local file {managed_path}'.format(managed_path=managed_path))
+ raise
+ if mode is None:
+ mode = stat.S_IMODE(file_stat.st_mode)
+ if not stat.S_ISLNK(file_stat.st_mode) and not stat.S_ISREG(file_stat.st_mode):
+ raise DockerFileCopyError('Local path {managed_path} is not a symbolic link or file')
+
+ if diff is not None:
+ if file_stat.st_size > max_file_size_for_diff > 0:
+ diff['src_larger'] = max_file_size_for_diff
+ elif stat.S_ISLNK(file_stat.st_mode):
+ diff['after_header'] = managed_path
+ diff['after'] = os.readlink(managed_path)
+ else:
+ with open(managed_path, 'rb') as f:
+ content = f.read()
+ if is_binary(content):
+ diff['src_binary'] = 1
+ else:
+ diff['after_header'] = managed_path
+ diff['after'] = to_text(content)
+
+ # When forcing and we're not following links in the container, go!
+ if force and not follow_links:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
+ return container_path, mode, False
+
+ # Resolve symlinks in the container (if requested), and get information on container's file
+ real_container_path, regular_stat, link_target = stat_file(
+ client,
+ container,
+ in_path=container_path,
+ follow_links=follow_links,
+ )
+
+ # Follow links in the Docker container?
+ if follow_links:
+ container_path = real_container_path
+
+ # If the file wasn't found, continue
+ if regular_stat is None:
+ if diff is not None:
+ diff['before_header'] = container_path
+ diff['before'] = ''
+ return container_path, mode, False
+
+ # When forcing, go!
+ if force:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+
+ # If force is set to False, and the destination exists, assume there's nothing to do
+ if force is False:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ copy_dst_to_src(diff)
+ return container_path, mode, True
+
+ # Basic idempotency checks
+ if stat.S_ISLNK(file_stat.st_mode):
+ if link_target is None:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ local_link_target = os.readlink(managed_path)
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, local_link_target == link_target
+ if link_target is not None:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if is_container_file_not_regular_file(regular_stat):
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if file_stat.st_size != regular_stat['size']:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if mode != get_container_file_mode(regular_stat):
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+
+ # Fetch file from container
+ def process_none(in_path):
+ return container_path, mode, False
+
+ def process_regular(in_path, tar, member):
+ # Check things like user/group ID and mode
+ if any([
+ member.mode & 0xFFF != mode,
+ member.uid != owner_id,
+ member.gid != group_id,
+ not stat.S_ISREG(file_stat.st_mode),
+ member.size != file_stat.st_size,
+ ]):
+ add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
+ return container_path, mode, False
+
+ tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
+ with open(managed_path, 'rb') as local_f:
+ is_equal = are_fileobjs_equal_with_diff_of_first(tar_f, local_f, member.size, diff, max_file_size_for_diff, in_path)
+ return container_path, mode, is_equal
+
+ def process_symlink(in_path, member):
+ if diff is not None:
+ diff['before_header'] = in_path
+ diff['before'] = member.linkname
+
+ # Check things like user/group ID and mode
+ if member.mode & 0xFFF != mode:
+ return container_path, mode, False
+ if member.uid != owner_id:
+ return container_path, mode, False
+ if member.gid != group_id:
+ return container_path, mode, False
+
+ if not stat.S_ISLNK(file_stat.st_mode):
+ return container_path, mode, False
+
+ local_link_target = os.readlink(managed_path)
+ return container_path, mode, member.linkname == local_link_target
+
+ def process_other(in_path, member):
+ add_other_diff(diff, in_path, member)
+ return container_path, mode, False
+
+ return fetch_file_ex(
+ client,
+ container,
+ in_path=container_path,
+ process_none=process_none,
+ process_regular=process_regular,
+ process_symlink=process_symlink,
+ process_other=process_other,
+ follow_links=follow_links,
+ )
+
+
+def copy_file_into_container(client, container, managed_path, container_path, follow_links, local_follow_links,
+ owner_id, group_id, mode, force=False, diff=False, max_file_size_for_diff=1):
+ if diff:
+ diff = {}
+ else:
+ diff = None
+
+ container_path, mode, idempotent = is_file_idempotent(
+ client,
+ container,
+ managed_path,
+ container_path,
+ follow_links,
+ local_follow_links,
+ owner_id,
+ group_id,
+ mode,
+ force=force,
+ diff=diff,
+ max_file_size_for_diff=max_file_size_for_diff,
+ )
+ changed = not idempotent
+
+ if changed and not client.module.check_mode:
+ put_file(
+ client,
+ container,
+ in_path=managed_path,
+ out_path=container_path,
+ user_id=owner_id,
+ group_id=group_id,
+ mode=mode,
+ follow_links=local_follow_links,
+ )
+
+ result = dict(
+ container_path=container_path,
+ changed=changed,
+ )
+ if diff:
+ result['diff'] = diff
+ client.module.exit_json(**result)
+
+
+def is_content_idempotent(client, container, content, container_path, follow_links, owner_id, group_id, mode,
+ force=False, diff=None, max_file_size_for_diff=1):
+ if diff is not None:
+ if len(content) > max_file_size_for_diff > 0:
+ diff['src_larger'] = max_file_size_for_diff
+ elif is_binary(content):
+ diff['src_binary'] = 1
+ else:
+ diff['after_header'] = 'dynamically generated'
+ diff['after'] = to_text(content)
+
+ # When forcing and we're not following links in the container, go!
+ if force and not follow_links:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
+ return container_path, mode, False
+
+ # Resolve symlinks in the container (if requested), and get information on container's file
+ real_container_path, regular_stat, link_target = stat_file(
+ client,
+ container,
+ in_path=container_path,
+ follow_links=follow_links,
+ )
+
+ # Follow links in the Docker container?
+ if follow_links:
+ container_path = real_container_path
+
+ # If the file wasn't found, continue
+ if regular_stat is None:
+ if diff is not None:
+ diff['before_header'] = container_path
+ diff['before'] = ''
+ return container_path, mode, False
+
+ # When forcing, go!
+ if force:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+
+ # If force is set to False, and the destination exists, assume there's nothing to do
+ if force is False:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ copy_dst_to_src(diff)
+ return container_path, mode, True
+
+ # Basic idempotency checks
+ if link_target is not None:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if is_container_file_not_regular_file(regular_stat):
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if len(content) != regular_stat['size']:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if mode != get_container_file_mode(regular_stat):
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+
+ # Fetch file from container
+ def process_none(in_path):
+ if diff is not None:
+ diff['before'] = ''
+ return container_path, mode, False
+
+ def process_regular(in_path, tar, member):
+ # Check things like user/group ID and mode
+ if any([
+ member.mode & 0xFFF != mode,
+ member.uid != owner_id,
+ member.gid != group_id,
+ member.size != len(content),
+ ]):
+ add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
+ return container_path, mode, False
+
+ tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
+ is_equal = are_fileobjs_equal_with_diff_of_first(tar_f, io.BytesIO(content), member.size, diff, max_file_size_for_diff, in_path)
+ return container_path, mode, is_equal
+
+ def process_symlink(in_path, member):
+ if diff is not None:
+ diff['before_header'] = in_path
+ diff['before'] = member.linkname
+
+ return container_path, mode, False
+
+ def process_other(in_path, member):
+ add_other_diff(diff, in_path, member)
+ return container_path, mode, False
+
+ return fetch_file_ex(
+ client,
+ container,
+ in_path=container_path,
+ process_none=process_none,
+ process_regular=process_regular,
+ process_symlink=process_symlink,
+ process_other=process_other,
+ follow_links=follow_links,
+ )
+
+
+def copy_content_into_container(client, container, content, container_path, follow_links,
+ owner_id, group_id, mode, force=False, diff=False, max_file_size_for_diff=1):
+ if diff:
+ diff = {}
+ else:
+ diff = None
+
+ container_path, mode, idempotent = is_content_idempotent(
+ client,
+ container,
+ content,
+ container_path,
+ follow_links,
+ owner_id,
+ group_id,
+ mode,
+ force=force,
+ diff=diff,
+ max_file_size_for_diff=max_file_size_for_diff,
+ )
+ changed = not idempotent
+
+ if changed and not client.module.check_mode:
+ put_file_content(
+ client,
+ container,
+ content=content,
+ out_path=container_path,
+ user_id=owner_id,
+ group_id=group_id,
+ mode=mode,
+ )
+
+ result = dict(
+ container_path=container_path,
+ changed=changed,
+ )
+ if diff:
+ # Since the content is no_log, make sure that the before/after strings look sufficiently different
+ key = generate_insecure_key()
+ diff['scrambled_diff'] = base64.b64encode(key)
+ for k in ('before', 'after'):
+ if k in diff:
+ diff[k] = scramble(diff[k], key)
+ result['diff'] = diff
+ client.module.exit_json(**result)
+
+
+def main():
+ argument_spec = dict(
+ container=dict(type='str', required=True),
+ path=dict(type='path'),
+ container_path=dict(type='str', required=True),
+ follow=dict(type='bool', default=False),
+ local_follow=dict(type='bool', default=True),
+ owner_id=dict(type='int'),
+ group_id=dict(type='int'),
+ mode=dict(type='int'),
+ force=dict(type='bool'),
+ content=dict(type='str', no_log=True),
+ content_is_b64=dict(type='bool', default=False),
+
+ # Undocumented parameters for use by the action plugin
+ _max_file_size_for_diff=dict(type='int'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ min_docker_api_version='1.20',
+ supports_check_mode=True,
+ mutually_exclusive=[('path', 'content')],
+ required_together=[('owner_id', 'group_id')],
+ required_by={
+ 'content': ['mode'],
+ },
+ )
+
+ container = client.module.params['container']
+ managed_path = client.module.params['path']
+ container_path = client.module.params['container_path']
+ follow = client.module.params['follow']
+ local_follow = client.module.params['local_follow']
+ owner_id = client.module.params['owner_id']
+ group_id = client.module.params['group_id']
+ mode = client.module.params['mode']
+ force = client.module.params['force']
+ content = client.module.params['content']
+ max_file_size_for_diff = client.module.params['_max_file_size_for_diff'] or 1
+
+ if content is not None:
+ if client.module.params['content_is_b64']:
+ try:
+ content = base64.b64decode(content)
+ except Exception as e: # depending on Python version and error, multiple different exceptions can be raised
+ client.fail('Cannot Base64 decode the content option: {0}'.format(e))
+ else:
+ content = to_bytes(content)
+
+ if not container_path.startswith(os.path.sep):
+ container_path = os.path.join(os.path.sep, container_path)
+ container_path = os.path.normpath(container_path)
+
+ try:
+ if owner_id is None or group_id is None:
+ owner_id, group_id = determine_user_group(client, container)
+
+ if content is not None:
+ copy_content_into_container(
+ client,
+ container,
+ content,
+ container_path,
+ follow_links=follow,
+ owner_id=owner_id,
+ group_id=group_id,
+ mode=mode,
+ force=force,
+ diff=client.module._diff,
+ max_file_size_for_diff=max_file_size_for_diff,
+ )
+ elif managed_path is not None:
+ copy_file_into_container(
+ client,
+ container,
+ managed_path,
+ container_path,
+ follow_links=follow,
+ local_follow_links=local_follow,
+ owner_id=owner_id,
+ group_id=group_id,
+ mode=mode,
+ force=force,
+ diff=client.module._diff,
+ max_file_size_for_diff=max_file_size_for_diff,
+ )
+ else:
+ # Can happen if a user explicitly passes `content: null` or `path: null`...
+ client.fail('One of path and content must be supplied')
+ except NotFound as exc:
+ client.fail('Could not find container "{1}" or resource in it ({0})'.format(exc, container))
+ except APIError as exc:
+ client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc())
+ except DockerException as exc:
+ client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc())
+ except RequestException as exc:
+ client.fail(
+ 'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}'.format(exc, container),
+ exception=traceback.format_exc())
+ except DockerUnexpectedError as exc:
+ client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc())
+ except DockerFileCopyError as exc:
+ client.fail(to_native(exc))
+ except OSError as exc:
+ client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_container_exec.py b/ansible_collections/community/docker/plugins/modules/docker_container_exec.py
new file mode 100644
index 00000000..95fde4f7
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_container_exec.py
@@ -0,0 +1,305 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_exec
+
+short_description: Execute command in a docker container
+
+version_added: 1.5.0
+
+description:
+ - Executes a command in a Docker container.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ container:
+ type: str
+ required: true
+ description:
+ - The name of the container to execute the command in.
+ argv:
+ type: list
+ elements: str
+ description:
+ - The command to execute.
+ - Since this is a list of arguments, no quoting is needed.
+ - Exactly one of I(argv) and I(command) must be specified.
+ command:
+ type: str
+ description:
+ - The command to execute.
+ - Exactly one of I(argv) and I(command) must be specified.
+ chdir:
+ type: str
+ description:
+ - The directory to run the command in.
+ detach:
+ description:
+ - Whether to run the command synchronously (I(detach=false), default) or asynchronously (I(detach=true)).
+ - If set to C(true), I(stdin) cannot be provided, and the return values C(stdout), C(stderr) and
+ C(rc) are not returned.
+ type: bool
+ default: false
+ version_added: 2.1.0
+ user:
+ type: str
+ description:
+ - If specified, the user to execute this command with.
+ stdin:
+ type: str
+ description:
+ - Set the stdin of the command directly to the specified value.
+ - Can only be used if I(detach=false).
+ stdin_add_newline:
+ type: bool
+ default: true
+ description:
+ - If set to C(true), appends a newline to I(stdin).
+ strip_empty_ends:
+ type: bool
+ default: true
+ description:
+ - Strip empty lines from the end of stdout/stderr in result.
+ tty:
+ type: bool
+ default: false
+ description:
+ - Whether to allocate a TTY.
+ env:
+ description:
+ - Dictionary of environment variables with their respective values to be passed to the command ran inside the container.
+ - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example C("true")) in order to avoid data loss.
+ - Please note that if you are passing values in with Jinja2 templates, like C("{{ value }}"), you need to add C(| string) to prevent Ansible to
+ convert strings such as C("true") back to booleans. The correct way is to use C("{{ value | string }}").
+ type: dict
+ version_added: 2.1.0
+
+notes:
+ - Does not support C(check_mode).
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Run a simple command (command)
+ community.docker.docker_container_exec:
+ container: foo
+ command: /bin/bash -c "ls -lah"
+ chdir: /root
+ register: result
+
+- name: Print stdout
+ debug:
+ var: result.stdout
+
+- name: Run a simple command (argv)
+ community.docker.docker_container_exec:
+ container: foo
+ argv:
+ - /bin/bash
+ - "-c"
+ - "ls -lah > /dev/stderr"
+ chdir: /root
+ register: result
+
+- name: Print stderr lines
+ debug:
+ var: result.stderr_lines
+'''
+
+RETURN = '''
+stdout:
+ type: str
+ returned: success and I(detach=false)
+ description:
+ - The standard output of the container command.
+stderr:
+ type: str
+ returned: success and I(detach=false)
+ description:
+ - The standard error output of the container command.
+rc:
+ type: int
+ returned: success and I(detach=false)
+ sample: 0
+ description:
+ - The exit code of the command.
+exec_id:
+ type: str
+ returned: success and I(detach=true)
+ sample: 249d9e3075655baf705ed8f40488c5e9434049cf3431976f1bfdb73741c574c5
+ description:
+ - The execution ID of the command.
+ version_added: 2.1.0
+'''
+
+import shlex
+import traceback
+
+from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
+from ansible.module_utils.compat import selectors
+from ansible.module_utils.six import string_types
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.socket_handler import (
+ DockerSocketHandlerModule,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import format_environment
+
+
+def main():
+ argument_spec = dict(
+ container=dict(type='str', required=True),
+ argv=dict(type='list', elements='str'),
+ command=dict(type='str'),
+ chdir=dict(type='str'),
+ detach=dict(type='bool', default=False),
+ user=dict(type='str'),
+ stdin=dict(type='str'),
+ stdin_add_newline=dict(type='bool', default=True),
+ strip_empty_ends=dict(type='bool', default=True),
+ tty=dict(type='bool', default=False),
+ env=dict(type='dict'),
+ )
+
+ option_minimal_versions = dict(
+ chdir=dict(docker_api_version='1.35'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ option_minimal_versions=option_minimal_versions,
+ mutually_exclusive=[('argv', 'command')],
+ required_one_of=[('argv', 'command')],
+ )
+
+ container = client.module.params['container']
+ argv = client.module.params['argv']
+ command = client.module.params['command']
+ chdir = client.module.params['chdir']
+ detach = client.module.params['detach']
+ user = client.module.params['user']
+ stdin = client.module.params['stdin']
+ strip_empty_ends = client.module.params['strip_empty_ends']
+ tty = client.module.params['tty']
+ env = client.module.params['env']
+
+ if env is not None:
+ for name, value in list(env.items()):
+ if not isinstance(value, string_types):
+ client.module.fail_json(
+ msg="Non-string value found for env option. Ambiguous env options must be "
+ "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
+ env[name] = to_text(value, errors='surrogate_or_strict')
+
+ if command is not None:
+ argv = shlex.split(command)
+
+ if detach and stdin is not None:
+ client.module.fail_json(msg='If detach=true, stdin cannot be provided.')
+
+ if stdin is not None and client.module.params['stdin_add_newline']:
+ stdin += '\n'
+
+ try:
+ data = {
+ 'Container': container,
+ 'User': user or '',
+ 'Privileged': False,
+ 'Tty': False,
+ 'AttachStdin': bool(stdin),
+ 'AttachStdout': True,
+ 'AttachStderr': True,
+ 'Cmd': argv,
+ 'Env': format_environment(env) if env is not None else None,
+ }
+ if chdir is not None:
+ data['WorkingDir'] = chdir
+
+ exec_data = client.post_json_to_json('/containers/{0}/exec', container, data=data)
+ exec_id = exec_data['Id']
+
+ data = {
+ 'Tty': tty,
+ 'Detach': detach,
+ }
+ if detach:
+ client.post_json_to_text('/exec/{0}/start', exec_id, data=data)
+ client.module.exit_json(changed=True, exec_id=exec_id)
+
+ else:
+ if stdin and not detach:
+ exec_socket = client.post_json_to_stream_socket('/exec/{0}/start', exec_id, data=data)
+ try:
+ with DockerSocketHandlerModule(exec_socket, client.module, selectors) as exec_socket_handler:
+ if stdin:
+ exec_socket_handler.write(to_bytes(stdin))
+
+ stdout, stderr = exec_socket_handler.consume()
+ finally:
+ exec_socket.close()
+ else:
+ stdout, stderr = client.post_json_to_stream('/exec/{0}/start', exec_id, data=data, stream=False, tty=tty, demux=True)
+
+ result = client.get_json('/exec/{0}/json', exec_id)
+
+ stdout = to_text(stdout or b'')
+ stderr = to_text(stderr or b'')
+ if strip_empty_ends:
+ stdout = stdout.rstrip('\r\n')
+ stderr = stderr.rstrip('\r\n')
+
+ client.module.exit_json(
+ changed=True,
+ stdout=stdout,
+ stderr=stderr,
+ rc=result.get('ExitCode') or 0,
+ )
+ except NotFound:
+ client.fail('Could not find container "{0}"'.format(container))
+ except APIError as e:
+ if e.response is not None and e.response.status_code == 409:
+ client.fail('The container "{0}" has been paused ({1})'.format(container, to_native(e)))
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_container_info.py b/ansible_collections/community/docker/plugins/modules/docker_container_info.py
new file mode 100644
index 00000000..bfc28156
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_container_info.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_info
+
+short_description: Retrieves facts about docker container
+
+description:
+ - Retrieves facts about a docker container.
+ - Essentially returns the output of C(docker inspect <name>), similar to what M(community.docker.docker_container)
+ returns for a non-absent container.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - The name of the container to inspect.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: true
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get infos on container
+ community.docker.docker_container_info:
+ name: mydata
+ register: result
+
+- name: Does container exist?
+ ansible.builtin.debug:
+ msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about container
+ ansible.builtin.debug:
+ var: result.container
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the container exists.
+ type: bool
+ returned: always
+ sample: true
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Will be C(none) if container does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ container = client.get_container(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if container else False),
+ container=container,
+ )
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_host_info.py b/ansible_collections/community/docker/plugins/modules/docker_host_info.py
new file mode 100644
index 00000000..63d235e8
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_host_info.py
@@ -0,0 +1,383 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_host_info
+
+short_description: Retrieves facts about docker host and lists of objects of the services.
+
+description:
+ - Retrieves facts about a docker host.
+ - Essentially returns the output of C(docker system info).
+ - The module also allows to list object names for containers, images, networks and volumes.
+ It also allows to query information on disk usage.
+ - The output differs depending on API version of the docker daemon.
+ - If the docker daemon cannot be contacted or does not meet the API version requirements,
+ the module will fail.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+
+options:
+ containers:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: false
+ containers_all:
+ description:
+ - By default, only running containers are returned.
+ - This corresponds to the C(--all) option to C(docker container list).
+ type: bool
+ default: false
+ version_added: 3.4.0
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to list.
+ - "For example, C(until: 24h)."
+ - C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
+ C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to list images.
+ type: bool
+ default: false
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to list.
+ - "For example, C(dangling: true)."
+ - C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
+ C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to list networks.
+ type: bool
+ default: false
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to list.
+ - C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
+ C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to list volumes.
+ type: bool
+ default: false
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to list.
+ - C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
+ C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ disk_usage:
+ description:
+ - Summary information on used disk space by all Docker layers.
+ - The output is a sum of images, volumes, containers and build cache.
+ type: bool
+ default: false
+ verbose_output:
+ description:
+ - When set to C(true) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(true)
+ then output will contain verbose information about objects matching the full output of API method.
+ For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: false
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get info on docker host
+ community.docker.docker_host_info:
+ register: result
+
+- name: Get info on docker host and list images
+ community.docker.docker_host_info:
+ images: true
+ register: result
+
+- name: Get info on docker host and list images matching the filter
+ community.docker.docker_host_info:
+ images: true
+ images_filters:
+ label: "mylabel"
+ register: result
+
+- name: Get info on docker host and verbose list images
+ community.docker.docker_host_info:
+ images: true
+ verbose_output: true
+ register: result
+
+- name: Get info on docker host and used disk space
+ community.docker.docker_host_info:
+ disk_usage: true
+ register: result
+
+- name: Get info on docker host and list containers matching the filter
+ community.docker.docker_host_info:
+ containers: true
+ containers_filters:
+ label:
+ - key1=value1
+ - key2=value2
+ register: result
+
+- ansible.builtin.debug:
+ var: result.host_info
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+
+host_info:
+ description:
+ - Facts representing the basic state of the docker host. Matches the C(docker system info) output.
+ returned: always
+ type: dict
+volumes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker volume ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(volumes) is C(true)
+ type: list
+ elements: dict
+networks:
+ description:
+ - List of dict objects containing the basic information about each network.
+ Keys matches the C(docker network ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(networks) is C(true)
+ type: list
+ elements: dict
+containers:
+ description:
+ - List of dict objects containing the basic information about each container.
+ Keys matches the C(docker container ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(containers) is C(true)
+ type: list
+ elements: dict
+images:
+ description:
+ - List of dict objects containing the basic information about each image.
+ Keys matches the C(docker image ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(images) is C(true)
+ type: list
+ elements: dict
+disk_usage:
+ description:
+ - Information on summary disk usage by images, containers and volumes on docker host
+ unless I(verbose_output=true). See description for I(verbose_output).
+ returned: When I(disk_usage) is C(true)
+ type: dict
+
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ clean_dict_booleans_for_docker_api,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, APIError
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import convert_filters
+
+
+class DockerHostManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerHostManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['volumes', 'networks', 'containers', 'images']
+
+ self.results['host_info'] = self.get_docker_host_info()
+
+ if self.client.module.params['disk_usage']:
+ self.results['disk_usage'] = self.get_docker_disk_usage_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name), True)
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+
+ def get_docker_host_info(self):
+ try:
+ return self.client.info()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_disk_usage_facts(self):
+ try:
+ if self.verbose_output:
+ return self.client.df()
+ else:
+ return dict(LayersSize=self.client.df()['LayersSize'])
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
+ items = None
+ items_list = []
+
+ header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
+ header_volumes = ['Driver', 'Name']
+ header_images = ['Id', 'RepoTags', 'Created', 'Size']
+ header_networks = ['Id', 'Driver', 'Name', 'Scope']
+
+ filter_arg = dict()
+ if filters:
+ filter_arg['filters'] = filters
+ try:
+ if docker_object == 'containers':
+ params = {
+ 'limit': -1,
+ 'all': 1 if self.client.module.params['containers_all'] else 0,
+ 'size': 0,
+ 'trunc_cmd': 0,
+ 'filters': convert_filters(filters) if filters else None,
+ }
+ items = self.client.get_json("/containers/json", params=params)
+ elif docker_object == 'networks':
+ params = {
+ 'filters': convert_filters(filters or {})
+ }
+ items = self.client.get_json("/networks", params=params)
+ elif docker_object == 'images':
+ params = {
+ 'only_ids': 0,
+ 'all': 0,
+ 'filters': convert_filters(filters) if filters else None,
+ }
+ items = self.client.get_json("/images/json", params=params)
+ elif docker_object == 'volumes':
+ params = {
+ 'filters': convert_filters(filters) if filters else None,
+ }
+ items = self.client.get_json('/volumes', params=params)
+ items = items['Volumes']
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host for object '%s': %s" % (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ return items
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'containers':
+ for key in header_containers:
+ item_record[key] = item.get(key)
+ elif docker_object == 'networks':
+ for key in header_networks:
+ item_record[key] = item.get(key)
+ elif docker_object == 'images':
+ for key in header_images:
+ item_record[key] = item.get(key)
+ elif docker_object == 'volumes':
+ for key in header_volumes:
+ item_record[key] = item.get(key)
+ items_list.append(item_record)
+
+ return items_list
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_all=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ disk_usage=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerHostManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_image.py b/ansible_collections/community/docker/plugins/modules/docker_image.py
new file mode 100644
index 00000000..735de786
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_image.py
@@ -0,0 +1,1033 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image
+
+short_description: Manage docker images
+
+description:
+ - Build, load or pull an image, making the image available for creating containers. Also supports tagging
+ an image, pushing an image, and archiving an image to a C(.tar) file.
+
+notes:
+ - Building images is done using Docker daemon's API. It is not possible to use BuildKit / buildx this way.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - When trying to pull an image, the module assumes this is always changed in check mode.
+ diff_mode:
+ support: none
+
+options:
+ source:
+ description:
+ - "Determines where the module will try to retrieve the image from."
+ - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
+ be specified when this value is used."
+ - "Use C(load) to load the image from a C(.tar) file. I(load_path) must
+ be specified when this value is used."
+ - "Use C(pull) to pull the image from a registry."
+ - "Use C(local) to make sure that the image is already available on the local
+ docker daemon. This means that the module does not try to build, pull or load the image."
+ type: str
+ choices:
+ - build
+ - load
+ - pull
+ - local
+ build:
+ description:
+ - "Specifies options used for building images."
+ type: dict
+ suboptions:
+ cache_from:
+ description:
+ - List of image names to consider as cache source.
+ type: list
+ elements: str
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ type: str
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ type: int
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ type: path
+ required: true
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ type: bool
+ default: false
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ type: bool
+ default: true
+ network:
+ description:
+ - The network to use for C(RUN) build instructions.
+ type: str
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ type: bool
+ default: false
+ etc_hosts:
+ description:
+ - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
+ type: dict
+ args:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap).
+ - Use C(-1) to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution.
+ - For example, C(0-3) or C(0,1).
+ type: str
+ use_config_proxy:
+ description:
+ - If set to C(true) and a proxy configuration is specified in the docker client configuration
+ (by default C($HOME/.docker/config.json)), the corresponding environment variables will
+ be set in the container being built.
+ type: bool
+ target:
+ description:
+ - When building an image specifies an intermediate build stage by
+ name as a final stage for the resulting image.
+ type: str
+ platform:
+ description:
+ - Platform in the format C(os[/arch[/variant]]).
+ type: str
+ version_added: 1.1.0
+ archive_path:
+ description:
+ - Use with state C(present) to archive an image to a .tar file.
+ type: path
+ load_path:
+ description:
+ - Use with state C(present) to load an image from a .tar file.
+ - Set I(source) to C(load) if you want to load the image.
+ type: path
+ force_source:
+ description:
+ - Use with state C(present) to build, load or pull an image (depending on the
+ value of the I(source) option) when the image already exists.
+ type: bool
+ default: false
+ force_absent:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name.
+ type: bool
+ default: false
+ force_tag:
+ description:
+ - Use with state C(present) to force tagging an image.
+ type: bool
+ default: false
+ name:
+ description:
+ - "Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name).
+ When pushing or pulling an image the name can optionally include the tag by appending C(:tag_name)."
+ - Note that image IDs (hashes) are only supported for I(state=absent), for I(state=present) with I(source=load),
+ and for I(state=present) with I(source=local).
+ type: str
+ required: true
+ pull:
+ description:
+ - "Specifies options used for pulling images."
+ type: dict
+ version_added: 1.3.0
+ suboptions:
+ platform:
+ description:
+ - When pulling an image, ask for this specific platform.
+ - Note that this value is not used to determine whether the image needs to be pulled. This might change
+ in the future in a minor release, though.
+ type: str
+ push:
+ description:
+ - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
+ type: bool
+ default: false
+ repository:
+ description:
+ - Use with state C(present) to tag the image.
+ - Expects format C(repository:tag). If no tag is provided, will use the value of the I(tag) parameter or C(latest).
+ - If I(push=true), I(repository) must either include a registry, or will be assumed to belong to the default
+ registry (Docker Hub).
+ type: str
+ state:
+ description:
+ - Make assertions about the state of an image.
+ - When C(absent) an image will be removed. Use the force option to un-tag and remove all images
+ matching the provided name.
+ - When C(present) check if an image exists using the provided name and tag. If the image is not found or the
+ force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ tag:
+ description:
+ - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
+ I(latest).
+ - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
+ type: str
+ default: latest
+
+requirements:
+ - "Docker API >= 1.25"
+
+author:
+ - Pavel Antonov (@softzilla)
+ - Chris Houseknecht (@chouseknecht)
+ - Sorin Sbarnea (@ssbarnea)
+
+'''
+
+EXAMPLES = '''
+
+- name: Pull an image
+ community.docker.docker_image:
+ name: pacur/centos-7
+ source: pull
+ # Select platform for pulling. If not specified, will pull whatever docker prefers.
+ pull:
+ platform: amd64
+
+- name: Tag and push to docker hub
+ community.docker.docker_image:
+ name: pacur/centos-7:56
+ repository: dcoppenhagan/myimage:7.56
+ push: true
+ source: local
+
+- name: Tag and push to local registry
+ community.docker.docker_image:
+ # Image will be centos:7
+ name: centos
+ # Will be pushed to localhost:5000/centos:7
+ repository: localhost:5000/centos
+ tag: 7
+ push: true
+ source: local
+
+- name: Add tag latest to image
+ community.docker.docker_image:
+ name: myimage:7.1.2
+ repository: myimage:latest
+ # As 'latest' usually already is present, we need to enable overwriting of existing tags:
+ force_tag: true
+ source: local
+
+- name: Remove image
+ community.docker.docker_image:
+ state: absent
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+
+- name: Build an image and push it to a private repo
+ community.docker.docker_image:
+ build:
+ path: ./sinatra
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ push: true
+ source: build
+
+- name: Archive image
+ community.docker.docker_image:
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ archive_path: my_sinatra.tar
+ source: local
+
+- name: Load image from archive and push to a private registry
+ community.docker.docker_image:
+ name: localhost:5000/myimages/sinatra
+ tag: v1
+ push: true
+ load_path: my_sinatra.tar
+ source: load
+
+- name: Build image and with build args
+ community.docker.docker_image:
+ name: myimage
+ build:
+ path: /path/to/build/dir
+ args:
+ log_volume: /var/log/myapp
+ listen_port: 8080
+ source: build
+
+- name: Build image using cache source
+ community.docker.docker_image:
+ name: myimage:latest
+ build:
+ path: /path/to/build/dir
+ # Use as cache source for building myimage
+ cache_from:
+ - nginx:latest
+ - alpine:3.8
+ source: build
+'''
+
+RETURN = '''
+image:
+ description: Image inspection results for the affected image.
+ returned: success
+ type: dict
+ sample: {}
+stdout:
+ description: Docker build output when building an image.
+ returned: success
+ type: str
+ sample: ""
+ version_added: 1.0.0
+'''
+
+import errno
+import json
+import os
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.image_archive import (
+ archived_image_manifest,
+ api_image_id,
+ ImageArchiveInvalidException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ clean_dict_booleans_for_docker_api,
+ DockerBaseClass,
+ is_image_name_id,
+ is_valid_tag,
+)
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils._api.auth import (
+ get_config_header,
+ resolve_repository_name,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.constants import (
+ DEFAULT_DATA_CHUNK_SIZE,
+ CONTAINER_LIMITS_KEYS,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound
+from ansible_collections.community.docker.plugins.module_utils._api.utils.build import (
+ process_dockerfile,
+ tar,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
+ format_extra_hosts,
+ parse_repository_tag,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+ '''
+ Configure a docker_image task.
+
+ :param client: Ansible Docker Client wrapper over Docker client
+ :type client: AnsibleDockerClient
+ :param results: This task adds its output values to this dictionary
+ :type results: dict
+ '''
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.source = parameters['source']
+ build = parameters['build'] or dict()
+ pull = parameters['pull'] or dict()
+ self.archive_path = parameters['archive_path']
+ self.cache_from = build.get('cache_from')
+ self.container_limits = build.get('container_limits')
+ self.dockerfile = build.get('dockerfile')
+ self.force_source = parameters['force_source']
+ self.force_absent = parameters['force_absent']
+ self.force_tag = parameters['force_tag']
+ self.load_path = parameters['load_path']
+ self.name = parameters['name']
+ self.network = build.get('network')
+ self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts'))
+ self.nocache = build.get('nocache', False)
+ self.build_path = build.get('path')
+ self.pull = build.get('pull')
+ self.target = build.get('target')
+ self.repository = parameters['repository']
+ self.rm = build.get('rm', True)
+ self.state = parameters['state']
+ self.tag = parameters['tag']
+ self.http_timeout = build.get('http_timeout')
+ self.pull_platform = pull.get('platform')
+ self.push = parameters['push']
+ self.buildargs = build.get('args')
+ self.build_platform = build.get('platform')
+ self.use_config_proxy = build.get('use_config_proxy')
+
+ # If name contains a tag, it takes precedence over tag parameter.
+ if not is_image_name_id(self.name):
+ repo, repo_tag = parse_repository_tag(self.name)
+ if repo_tag:
+ self.name = repo
+ self.tag = repo_tag
+
+ # Sanity check: fail early when we know that something will fail later
+ if self.repository and is_image_name_id(self.repository):
+ self.fail("`repository` must not be an image ID; got: %s" % self.repository)
+ if not self.repository and self.push and is_image_name_id(self.name):
+ self.fail("Cannot push an image by ID; specify `repository` to tag and push the image with ID %s instead" % self.name)
+
+ if self.state == 'present':
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def present(self):
+ '''
+ Handles state = 'present', which includes building, loading or pulling an image,
+ depending on user provided parameters.
+
+ :returns None
+ '''
+ if is_image_name_id(self.name):
+ image = self.client.find_image_by_id(self.name, accept_missing_image=True)
+ else:
+ image = self.client.find_image(name=self.name, tag=self.tag)
+
+ if not image or self.force_source:
+ if self.source == 'build':
+ if is_image_name_id(self.name):
+ self.fail("Image name must not be an image ID for source=build; got: %s" % self.name)
+
+ # Build the image
+ if not os.path.isdir(self.build_path):
+ self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.log("Building image %s" % image_name)
+ self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results.update(self.build_image())
+
+ elif self.source == 'load':
+ # Load the image from an archive
+ if not os.path.isfile(self.load_path):
+ self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
+ self.load_path))
+ image_name = self.name
+ if self.tag and not is_image_name_id(image_name):
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'] = self.load_image()
+ elif self.source == 'pull':
+ if is_image_name_id(self.name):
+ self.fail("Image name must not be an image ID for source=pull; got: %s" % self.name)
+
+ # pull the image
+ self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag, platform=self.pull_platform)
+ elif self.source == 'local':
+ if image is None:
+ name = self.name
+ if self.tag and not is_image_name_id(name):
+ name = "%s:%s" % (self.name, self.tag)
+ self.client.fail('Cannot find the image %s locally.' % name)
+ if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+ else:
+ self.results['image'] = image
+
+ if self.archive_path:
+ self.archive_image(self.name, self.tag)
+
+ if self.push and not self.repository:
+ self.push_image(self.name, self.tag)
+ elif self.repository:
+ self.tag_image(self.name, self.tag, self.repository, push=self.push)
+
+ def absent(self):
+ '''
+ Handles state = 'absent', which removes an image.
+
+ :return None
+ '''
+ name = self.name
+ if is_image_name_id(name):
+ image = self.client.find_image_by_id(name, accept_missing_image=True)
+ else:
+ image = self.client.find_image(name, self.tag)
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ if image:
+ if not self.check_mode:
+ try:
+ self.client.delete_json('/images/{0}', name, params={'force': self.force_absent})
+ except NotFound:
+ # If the image vanished while we were trying to remove it, don't fail
+ pass
+ except Exception as exc:
+ self.fail("Error removing image %s - %s" % (name, to_native(exc)))
+
+ self.results['changed'] = True
+ self.results['actions'].append("Removed image %s" % (name))
+ self.results['image']['state'] = 'Deleted'
+
+ @staticmethod
+ def archived_image_action(failure_logger, archive_path, current_image_name, current_image_id):
+ '''
+ If the archive is missing or requires replacement, return an action message.
+
+ :param failure_logger: a logging function that accepts one parameter of type str
+ :type failure_logger: Callable
+ :param archive_path: Filename to write archive to
+ :type archive_path: str
+ :param current_image_name: repo:tag
+ :type current_image_name: str
+ :param current_image_id: Hash, including hash type prefix such as "sha256:"
+ :type current_image_id: str
+
+ :returns: Either None, or an Ansible action message.
+ :rtype: str
+ '''
+
+ def build_msg(reason):
+ return 'Archived image %s to %s, %s' % (current_image_name, archive_path, reason)
+
+ try:
+ archived = archived_image_manifest(archive_path)
+ except ImageArchiveInvalidException as exc:
+ failure_logger('Unable to extract manifest summary from archive: %s' % to_native(exc))
+ return build_msg('overwriting an unreadable archive file')
+
+ if archived is None:
+ return build_msg('since none present')
+ elif current_image_id == api_image_id(archived.image_id) and [current_image_name] == archived.repo_tags:
+ return None
+ else:
+ name = ', '.join(archived.repo_tags)
+
+ return build_msg('overwriting archive with image %s named %s' % (archived.image_id, name))
+
+ def archive_image(self, name, tag):
+ '''
+ Archive an image to a .tar file. Called when archive_path is passed.
+
+ :param name: Name/repository of the image
+ :type name: str
+ :param tag: Optional image tag; assumed to be "latest" if None
+ :type tag: str | None
+ '''
+
+ if not tag:
+ tag = "latest"
+
+ if is_image_name_id(name):
+ image = self.client.find_image_by_id(name, accept_missing_image=True)
+ image_name = name
+ else:
+ image = self.client.find_image(name=name, tag=tag)
+ image_name = "%s:%s" % (name, tag)
+
+ if not image:
+ self.log("archive image: image %s not found" % image_name)
+ return
+
+ # Will have a 'sha256:' prefix
+ image_id = image['Id']
+
+ action = self.archived_image_action(self.client.module.debug, self.archive_path, image_name, image_id)
+
+ if action:
+ self.results['actions'].append(action)
+
+ self.results['changed'] = action is not None
+
+ if (not self.check_mode) and self.results['changed']:
+ self.log("Getting archive of image %s" % image_name)
+ try:
+ saved_image = self.client._stream_raw_result(
+ self.client._get(self.client._url('/images/{0}/get', image_name), stream=True),
+ DEFAULT_DATA_CHUNK_SIZE,
+ False,
+ )
+ except Exception as exc:
+ self.fail("Error getting image %s - %s" % (image_name, to_native(exc)))
+
+ try:
+ with open(self.archive_path, 'wb') as fd:
+ for chunk in saved_image:
+ fd.write(chunk)
+ except Exception as exc:
+ self.fail("Error writing image archive %s - %s" % (self.archive_path, to_native(exc)))
+
+ self.results['image'] = image
+
+ def push_image(self, name, tag=None):
+ '''
+ If the name of the image contains a repository path, then push the image.
+
+ :param name Name of the image to push.
+ :param tag Use a specific tag.
+ :return: None
+ '''
+
+ if is_image_name_id(name):
+ self.fail("Cannot push an image ID: %s" % name)
+
+ repository = name
+ if not tag:
+ repository, tag = parse_repository_tag(name)
+ registry, repo_name = resolve_repository_name(repository)
+
+ self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+
+ if registry:
+ self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ status = None
+ try:
+ changed = False
+
+ push_repository, push_tag = repository, tag
+ if not push_tag:
+ push_repository, push_tag = parse_repository_tag(push_repository)
+ push_registry, dummy = resolve_repository_name(push_repository)
+ headers = {}
+ header = get_config_header(self.client, push_registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self.client._post_json(
+ self.client._url("/images/{0}/push", push_repository),
+ data=None,
+ headers=headers,
+ stream=True,
+ params={'tag': push_tag},
+ )
+ self.client._raise_for_status(response)
+ for line in self.client._stream_helper(response, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('errorDetail'):
+ raise Exception(line['errorDetail']['message'])
+ status = line.get('status')
+ if status == 'Pushing':
+ changed = True
+ self.results['changed'] = changed
+ except Exception as exc:
+ if 'unauthorized' in str(exc):
+ if 'authentication required' in str(exc):
+ self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
+ (registry, repo_name, tag, to_native(exc), registry))
+ else:
+ self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
+ (registry, repo_name, tag, str(exc)))
+ self.fail("Error pushing image %s: %s" % (repository, to_native(exc)))
+ self.results['image'] = self.client.find_image(name=repository, tag=tag)
+ if not self.results['image']:
+ self.results['image'] = dict()
+ self.results['image']['push_status'] = status
+
+ def tag_image(self, name, tag, repository, push=False):
+ '''
+ Tag an image into a repository.
+
+ :param name: name of the image. required.
+ :param tag: image tag.
+ :param repository: path to the repository. required.
+ :param push: bool. push the image once it's tagged.
+ :return: None
+ '''
+ repo, repo_tag = parse_repository_tag(repository)
+ if not repo_tag:
+ repo_tag = "latest"
+ if tag:
+ repo_tag = tag
+ image = self.client.find_image(name=repo, tag=repo_tag)
+ found = 'found' if image else 'not found'
+ self.log("image %s was %s" % (repo, found))
+
+ if not image or self.force_tag:
+ image_name = name
+ if not is_image_name_id(name) and tag and not name.endswith(':' + tag):
+ image_name = "%s:%s" % (name, tag)
+ self.log("tagging %s to %s:%s" % (image_name, repo, repo_tag))
+ self.results['changed'] = True
+ self.results['actions'].append("Tagged image %s to %s:%s" % (image_name, repo, repo_tag))
+ if not self.check_mode:
+ try:
+ # Finding the image does not always work, especially running a localhost registry. In those
+ # cases, if we don't set force=True, it errors.
+ params = {
+ 'tag': repo_tag,
+ 'repo': repo,
+ 'force': True,
+ }
+ res = self.client._post(self.client._url('/images/{0}/tag', image_name), params=params)
+ self.client._raise_for_status(res)
+ if res.status_code != 201:
+ raise Exception("Tag operation failed.")
+ except Exception as exc:
+ self.fail("Error: failed to tag image - %s" % to_native(exc))
+ self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
+ if image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if push:
+ self.push_image(repo, repo_tag)
+
+ @staticmethod
+ def _extract_output_line(line, output):
+ '''
+ Extract text line from stream output and, if found, adds it to output.
+ '''
+ if 'stream' in line or 'status' in line:
+ # Make sure we have a string (assuming that line['stream'] and
+ # line['status'] are either not defined, falsish, or a string)
+ text_line = line.get('stream') or line.get('status') or ''
+ output.extend(text_line.splitlines())
+
+ def build_image(self):
+ '''
+ Build an image
+
+ :return: image dict
+ '''
+ remote = context = None
+ headers = {}
+ buildargs = {}
+ if self.buildargs:
+ for key, value in self.buildargs.items():
+ buildargs[key] = to_native(value)
+
+ container_limits = self.container_limits or {}
+ for key in container_limits.keys():
+ if key not in CONTAINER_LIMITS_KEYS:
+ raise DockerException('Invalid container_limits key {key}'.format(key=key))
+
+ dockerfile = self.dockerfile
+ if self.build_path.startswith(('http://', 'https://', 'git://', 'github.com/', 'git@')):
+ remote = self.build_path
+ elif not os.path.isdir(self.build_path):
+ raise TypeError("You must specify a directory to build in path")
+ else:
+ dockerignore = os.path.join(self.build_path, '.dockerignore')
+ exclude = None
+ if os.path.exists(dockerignore):
+ with open(dockerignore) as f:
+ exclude = list(filter(
+ lambda x: x != '' and x[0] != '#',
+ [line.strip() for line in f.read().splitlines()]
+ ))
+ dockerfile = process_dockerfile(dockerfile, self.build_path)
+ context = tar(self.build_path, exclude=exclude, dockerfile=dockerfile, gzip=False)
+
+ params = {
+ 't': "%s:%s" % (self.name, self.tag) if self.tag else self.name,
+ 'remote': remote,
+ 'q': False,
+ 'nocache': self.nocache,
+ 'rm': self.rm,
+ 'forcerm': self.rm,
+ 'pull': self.pull,
+ 'dockerfile': dockerfile,
+ }
+ params.update(container_limits)
+
+ if self.use_config_proxy:
+ proxy_args = self.client._proxy_configs.get_environment()
+ for k, v in proxy_args.items():
+ buildargs.setdefault(k, v)
+ if buildargs:
+ params.update({'buildargs': json.dumps(buildargs)})
+
+ if self.cache_from:
+ params.update({'cachefrom': json.dumps(self.cache_from)})
+
+ if self.target:
+ params.update({'target': self.target})
+
+ if self.network:
+ params.update({'networkmode': self.network})
+
+ if self.extra_hosts is not None:
+ params.update({'extrahosts': format_extra_hosts(self.extra_hosts)})
+
+ if self.build_platform is not None:
+ params['platform'] = self.build_platform
+
+ if context is not None:
+ headers['Content-Type'] = 'application/tar'
+
+ self.client._set_auth_headers(headers)
+
+ response = self.client._post(
+ self.client._url('/build'),
+ data=context,
+ params=params,
+ headers=headers,
+ stream=True,
+ timeout=self.http_timeout,
+ )
+
+ if context is not None:
+ context.close()
+
+ build_output = []
+ for line in self.client._stream_helper(response, decode=True):
+ # line = json.loads(line)
+ self.log(line, pretty_print=True)
+ self._extract_output_line(line, build_output)
+
+ if line.get('error'):
+ if line.get('errorDetail'):
+ errorDetail = line.get('errorDetail')
+ self.fail(
+ "Error building %s - code: %s, message: %s, logs: %s" % (
+ self.name,
+ errorDetail.get('code'),
+ errorDetail.get('message'),
+ build_output))
+ else:
+ self.fail("Error building %s - message: %s, logs: %s" % (
+ self.name, line.get('error'), build_output))
+
+ return {
+ "stdout": "\n".join(build_output),
+ "image": self.client.find_image(name=self.name, tag=self.tag),
+ }
+
+ def load_image(self):
+ '''
+ Load an image from a .tar archive
+
+ :return: image dict
+ '''
+ # Load image(s) from file
+ load_output = []
+ has_output = False
+ try:
+ self.log("Opening image %s" % self.load_path)
+ with open(self.load_path, 'rb') as image_tar:
+ self.log("Loading image from %s" % self.load_path)
+ res = self.client._post(self.client._url("/images/load"), data=image_tar, stream=True)
+ if LooseVersion(self.client.api_version) >= LooseVersion('1.23'):
+ has_output = True
+ for line in self.client._stream_helper(res, decode=True):
+ self.log(line, pretty_print=True)
+ self._extract_output_line(line, load_output)
+ else:
+ self.client._raise_for_status(res)
+ self.client.module.warn(
+ 'The API version of your Docker daemon is < 1.23, which does not return the image'
+ ' loading result from the Docker daemon. Therefore, we cannot verify whether the'
+ ' expected image was loaded, whether multiple images where loaded, or whether the load'
+ ' actually succeeded. You should consider upgrading your Docker daemon.'
+ )
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ self.client.fail("Error opening image %s - %s" % (self.load_path, to_native(exc)))
+ self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output))
+ except Exception as exc:
+ self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output))
+
+ # Collect loaded images
+ if has_output:
+ # We can only do this when we actually got some output from Docker daemon
+ loaded_images = set()
+ loaded_image_ids = set()
+ for line in load_output:
+ if line.startswith('Loaded image:'):
+ loaded_images.add(line[len('Loaded image:'):].strip())
+ if line.startswith('Loaded image ID:'):
+ loaded_image_ids.add(line[len('Loaded image ID:'):].strip().lower())
+
+ if not loaded_images and not loaded_image_ids:
+ self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
+
+ if is_image_name_id(self.name):
+ expected_image = self.name.lower()
+ found_image = expected_image not in loaded_image_ids
+ else:
+ expected_image = '%s:%s' % (self.name, self.tag)
+ found_image = expected_image not in loaded_images
+ if found_image:
+ self.client.fail(
+ "The archive did not contain image '%s'. Instead, found %s." % (
+ expected_image,
+ ', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids)))),
+ stdout='\n'.join(load_output))
+ loaded_images.remove(expected_image)
+
+ if loaded_images:
+ self.client.module.warn(
+ "The archive contained more images than specified: %s" % (
+ ', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids))), ))
+
+ if is_image_name_id(self.name):
+ return self.client.find_image_by_id(self.name, accept_missing_image=True)
+ else:
+ return self.client.find_image(self.name, self.tag)
+
+
+def main():
+ argument_spec = dict(
+ source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
+ build=dict(type='dict', options=dict(
+ cache_from=dict(type='list', elements='str'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ )),
+ dockerfile=dict(type='str'),
+ http_timeout=dict(type='int'),
+ network=dict(type='str'),
+ nocache=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ pull=dict(type='bool', default=False),
+ rm=dict(type='bool', default=True),
+ args=dict(type='dict'),
+ use_config_proxy=dict(type='bool'),
+ target=dict(type='str'),
+ etc_hosts=dict(type='dict'),
+ platform=dict(type='str'),
+ )),
+ archive_path=dict(type='path'),
+ force_source=dict(type='bool', default=False),
+ force_absent=dict(type='bool', default=False),
+ force_tag=dict(type='bool', default=False),
+ load_path=dict(type='path'),
+ name=dict(type='str', required=True),
+ pull=dict(type='dict', options=dict(
+ platform=dict(type='str'),
+ )),
+ push=dict(type='bool', default=False),
+ repository=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ tag=dict(type='str', default='latest'),
+ )
+
+ required_if = [
+ ('state', 'present', ['source']),
+ ('source', 'build', ['build']),
+ ('source', 'load', ['load_path']),
+ ]
+
+ def detect_etc_hosts(client):
+ return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts'))
+
+ def detect_build_platform(client):
+ return client.module.params['build'] and client.module.params['build'].get('platform') is not None
+
+ def detect_pull_platform(client):
+ return client.module.params['pull'] and client.module.params['pull'].get('platform') is not None
+
+ option_minimal_versions = dict()
+ option_minimal_versions["build.etc_hosts"] = dict(docker_api_version='1.27', detect_usage=detect_etc_hosts)
+ option_minimal_versions["build.platform"] = dict(docker_api_version='1.32', detect_usage=detect_build_platform)
+ option_minimal_versions["pull.platform"] = dict(docker_api_version='1.32', detect_usage=detect_pull_platform)
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ if not is_valid_tag(client.module.params['tag'], allow_empty=True):
+ client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
+
+ if client.module.params['source'] == 'build':
+ if not client.module.params['build'] or not client.module.params['build'].get('path'):
+ client.fail('If "source" is set to "build", the "build.path" option must be specified.')
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ image={}
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_info.py b/ansible_collections/community/docker/plugins/modules/docker_image_info.py
new file mode 100644
index 00000000..e4f480b1
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_image_info.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.docker.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+requirements:
+ - "Docker API >= 1.25"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.docker.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.docker.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ is_image_name_id,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name, accept_missing_image=True)
+ else:
+ repository, tag = parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ params = {
+ 'only_ids': 0,
+ 'all': 0,
+ }
+ images = self.client.get_json("/images/json", params=params)
+ for image in images:
+ try:
+ inspection = self.client.get_json('/images/{0}/json', image['Id'])
+ except NotFound:
+ inspection = None
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], to_native(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_load.py b/ansible_collections/community/docker/plugins/modules/docker_image_load.py
new file mode 100644
index 00000000..880ae4e4
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_image_load.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_load
+
+short_description: Load docker image(s) from archives
+
+version_added: 1.3.0
+
+description:
+ - Load one or multiple Docker images from a C(.tar) archive, and return information on
+ the loaded image(s).
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ path:
+ description:
+ - The path to the C(.tar) archive to load Docker image(s) from.
+ type: path
+ required: true
+
+notes:
+ - Does not support C(check_mode).
+
+requirements:
+ - "Docker API >= 1.25"
+
+author:
+ - Felix Fontein (@felixfontein)
+'''
+
+EXAMPLES = '''
+- name: Load all image(s) from the given tar file
+ community.docker.docker_image_load:
+ path: /path/to/images.tar
+ register: result
+
+- name: Print the loaded image names
+ ansible.builtin.debug:
+ msg: "Loaded the following images: {{ result.image_names | join(', ') }}"
+'''
+
+RETURN = '''
+image_names:
+ description: List of image names and IDs loaded from the archive.
+ returned: success
+ type: list
+ elements: str
+ sample:
+ - 'hello-world:latest'
+ - 'sha256:e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9'
+images:
+ description: Image inspection results for the loaded images.
+ returned: success
+ type: list
+ elements: dict
+ sample: []
+'''
+
+import errno
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ is_image_name_id,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+class ImageManager(DockerBaseClass):
+ def __init__(self, client, results):
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.path = parameters['path']
+
+ self.load_images()
+
+ @staticmethod
+ def _extract_output_line(line, output):
+ '''
+ Extract text line from stream output and, if found, adds it to output.
+ '''
+ if 'stream' in line or 'status' in line:
+ # Make sure we have a string (assuming that line['stream'] and
+ # line['status'] are either not defined, falsish, or a string)
+ text_line = line.get('stream') or line.get('status') or ''
+ output.extend(text_line.splitlines())
+
+ def load_images(self):
+ '''
+ Load images from a .tar archive
+ '''
+ # Load image(s) from file
+ load_output = []
+ try:
+ self.log("Opening image {0}".format(self.path))
+ with open(self.path, 'rb') as image_tar:
+ self.log("Loading images from {0}".format(self.path))
+ res = self.client._post(self.client._url("/images/load"), data=image_tar, stream=True)
+ for line in self.client._stream_helper(res, decode=True):
+ self.log(line, pretty_print=True)
+ self._extract_output_line(line, load_output)
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ self.client.fail("Error opening archive {0} - {1}".format(self.path, to_native(exc)))
+ self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
+ except Exception as exc:
+ self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
+
+ # Collect loaded images
+ loaded_images = []
+ for line in load_output:
+ if line.startswith('Loaded image:'):
+ loaded_images.append(line[len('Loaded image:'):].strip())
+ if line.startswith('Loaded image ID:'):
+ loaded_images.append(line[len('Loaded image ID:'):].strip())
+
+ if not loaded_images:
+ self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
+
+ images = []
+ for image_name in loaded_images:
+ if is_image_name_id(image_name):
+ images.append(self.client.find_image_by_id(image_name))
+ elif ':' in image_name:
+ image_name, tag = image_name.rsplit(':', 1)
+ images.append(self.client.find_image(image_name, tag))
+ else:
+ self.client.module.warn('Image name "{0}" is neither ID nor has a tag'.format(image_name))
+
+ self.results['image_names'] = loaded_images
+ self.results['images'] = images
+ self.results['changed'] = True
+ self.results['stdout'] = '\n'.join(load_output)
+
+
+def main():
+ client = AnsibleDockerClient(
+ argument_spec=dict(
+ path=dict(type='path', required=True),
+ ),
+ supports_check_mode=False,
+ )
+
+ try:
+ results = dict(
+ image_names=[],
+ images=[],
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_login.py b/ansible_collections/community/docker/plugins/modules/docker_login.py
new file mode 100644
index 00000000..360dd578
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_login.py
@@ -0,0 +1,451 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
+# Chris Houseknecht, <house@redhat.com>
+# James Tanner, <jtanner@redhat.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_login
+short_description: Log into a Docker registry.
+description:
+ - Provides functionality similar to the C(docker login) command.
+ - Authenticate with a docker registry and add the credentials to your local Docker config file respectively the
+ credentials store associated to the registry. Adding the credentials to the config files resp. the credential
+ store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI
+ and Docker SDK for Python without needing to provide credentials.
+ - Running in check mode will perform the authentication without updating the config file.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ registry_url:
+ description:
+ - The registry URL.
+ type: str
+ default: "https://index.docker.io/v1/"
+ aliases:
+ - registry
+ - url
+ username:
+ description:
+ - The username for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ password:
+ description:
+ - The plaintext password for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ reauthorize:
+ description:
+ - Refresh existing authentication found in the configuration file.
+ type: bool
+ default: false
+ aliases:
+ - reauth
+ config_path:
+ description:
+ - Custom path to the Docker CLI configuration file.
+ type: path
+ default: ~/.docker/config.json
+ aliases:
+ - dockercfg_path
+ state:
+ description:
+ - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
+ - To logout you only need the registry server, which defaults to DockerHub.
+ - Before 2.1 you could ONLY log in.
+ - Docker does not support 'logout' with a custom config file.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+
+requirements:
+ - "Docker API >= 1.25"
+author:
+ - Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Log into DockerHub
+ community.docker.docker_login:
+ username: docker
+ password: rekcod
+
+- name: Log into private registry and force re-authorization
+ community.docker.docker_login:
+ registry_url: your.private.registry.io
+ username: yourself
+ password: secrets3
+ reauthorize: true
+
+- name: Log into DockerHub using a custom config file
+ community.docker.docker_login:
+ username: docker
+ password: rekcod
+ config_path: /tmp/.mydockercfg
+
+- name: Log out of DockerHub
+ community.docker.docker_login:
+ state: absent
+'''
+
+RETURN = '''
+login_results:
+ description: Results from the login.
+ returned: when I(state=present)
+ type: dict
+ sample: {
+ "serveraddress": "localhost:5000",
+ "username": "testuser"
+ }
+'''
+
+import base64
+import json
+import os
+import traceback
+
+from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DEFAULT_DOCKER_REGISTRY,
+ DockerBaseClass,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api import auth
+from ansible_collections.community.docker.plugins.module_utils._api.auth import decode_auth
+from ansible_collections.community.docker.plugins.module_utils._api.credentials.errors import CredentialsNotFound
+from ansible_collections.community.docker.plugins.module_utils._api.credentials.store import Store
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+class DockerFileStore(object):
+ '''
+ A custom credential store class that implements only the functionality we need to
+ update the docker config file when no credential helpers is provided.
+ '''
+
+ program = "<legacy config>"
+
+ def __init__(self, config_path):
+ self._config_path = config_path
+
+ # Make sure we have a minimal config if none is available.
+ self._config = dict(
+ auths=dict()
+ )
+
+ try:
+ # Attempt to read the existing config.
+ with open(self._config_path, "r") as f:
+ config = json.load(f)
+ except (ValueError, IOError):
+ # No config found or an invalid config found so we'll ignore it.
+ config = dict()
+
+ # Update our internal config with what ever was loaded.
+ self._config.update(config)
+
+ @property
+ def config_path(self):
+ '''
+ Return the config path configured in this DockerFileStore instance.
+ '''
+
+ return self._config_path
+
+ def get(self, server):
+ '''
+ Retrieve credentials for `server` if there are any in the config file.
+ Otherwise raise a `StoreError`
+ '''
+
+ server_creds = self._config['auths'].get(server)
+ if not server_creds:
+ raise CredentialsNotFound('No matching credentials')
+
+ (username, password) = decode_auth(server_creds['auth'])
+
+ return dict(
+ Username=username,
+ Secret=password
+ )
+
+ def _write(self):
+ '''
+ Write config back out to disk.
+ '''
+ # Make sure directory exists
+ dir = os.path.dirname(self._config_path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ # Write config; make sure it has permissions 0x600
+ content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
+ f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ try:
+ os.write(f, content)
+ finally:
+ os.close(f)
+
+ def store(self, server, username, password):
+ '''
+ Add a credentials for `server` to the current configuration.
+ '''
+
+ b64auth = base64.b64encode(
+ to_bytes(username) + b':' + to_bytes(password)
+ )
+ auth = to_text(b64auth)
+
+ # build up the auth structure
+ if 'auths' not in self._config:
+ self._config['auths'] = dict()
+
+ self._config['auths'][server] = dict(
+ auth=auth
+ )
+
+ self._write()
+
+ def erase(self, server):
+ '''
+ Remove credentials for the given server from the configuration.
+ '''
+
+ if 'auths' in self._config and server in self._config['auths']:
+ self._config['auths'].pop(server)
+ self._write()
+
+
+class LoginManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(LoginManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.registry_url = parameters.get('registry_url')
+ self.username = parameters.get('username')
+ self.password = parameters.get('password')
+ self.reauthorize = parameters.get('reauthorize')
+ self.config_path = parameters.get('config_path')
+ self.state = parameters.get('state')
+
+ def run(self):
+ '''
+ Do the actuall work of this task here. This allows instantiation for partial
+ testing.
+ '''
+
+ if self.state == 'present':
+ self.login()
+ else:
+ self.logout()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def _login(self, reauth):
+ if self.config_path and os.path.exists(self.config_path):
+ self.client._auth_configs = auth.load_config(
+ self.config_path, credstore_env=self.client.credstore_env
+ )
+ elif not self.client._auth_configs or self.client._auth_configs.is_empty:
+ self.client._auth_configs = auth.load_config(
+ credstore_env=self.client.credstore_env
+ )
+
+ authcfg = self.client._auth_configs.resolve_authconfig(self.registry_url)
+ # If we found an existing auth config for this registry and username
+ # combination, we can return it immediately unless reauth is requested.
+ if authcfg and authcfg.get('username', None) == self.username \
+ and not reauth:
+ return authcfg
+
+ req_data = {
+ 'username': self.username,
+ 'password': self.password,
+ 'email': None,
+ 'serveraddress': self.registry_url,
+ }
+
+ response = self.client._post_json(self.client._url('/auth'), data=req_data)
+ if response.status_code == 200:
+ self.client._auth_configs.add_auth(self.registry_url or auth.INDEX_NAME, req_data)
+ return self.client._result(response, json=True)
+
+ def login(self):
+ '''
+ Log into the registry with provided username/password. On success update the config
+ file with the new authorization.
+
+ :return: None
+ '''
+
+ self.results['actions'].append("Logged into %s" % (self.registry_url))
+ self.log("Log into %s with username %s" % (self.registry_url, self.username))
+ try:
+ response = self._login(self.reauthorize)
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
+
+ # If user is already logged in, then response contains password for user
+ if 'password' in response:
+ # This returns correct password if user is logged in and wrong password is given.
+ # So if it returns another password as we passed, and the user didn't request to
+ # reauthorize, still do it.
+ if not self.reauthorize and response['password'] != self.password:
+ try:
+ response = self._login(True)
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
+ response.pop('password', None)
+ self.results['login_result'] = response
+
+ self.update_credentials()
+
+ def logout(self):
+ '''
+ Log out of the registry. On success update the config file.
+
+ :return: None
+ '''
+
+ # Get the configuration store.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
+ self.results['changed'] = False
+ return
+
+ if not self.check_mode:
+ store.erase(self.registry_url)
+ self.results['changed'] = True
+
+ def update_credentials(self):
+ '''
+ If the authorization is not stored attempt to store authorization values via
+ the appropriate credential helper or to the config file.
+
+ :return: None
+ '''
+
+ # Check to see if credentials already exist.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ current = dict(
+ Username='',
+ Secret=''
+ )
+
+ if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
+ if not self.check_mode:
+ store.store(self.registry_url, self.username, self.password)
+ self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
+ self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
+ store.program, self.registry_url))
+ self.results['changed'] = True
+
+ def get_credential_store_instance(self, registry, dockercfg_path):
+ '''
+ Return an instance of docker.credentials.Store used by the given registry.
+
+ :return: A Store or None
+ :rtype: Union[docker.credentials.Store, NoneType]
+ '''
+
+ credstore_env = self.client.credstore_env
+
+ config = auth.load_config(config_path=dockercfg_path)
+
+ store_name = auth.get_credential_store(config, registry)
+
+ # Make sure that there is a credential helper before trying to instantiate a
+ # Store object.
+ if store_name:
+ self.log("Found credential store %s" % store_name)
+ return Store(store_name, environment=credstore_env)
+
+ return DockerFileStore(dockercfg_path)
+
+
+def main():
+
+ argument_spec = dict(
+ registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ reauthorize=dict(type='bool', default=False, aliases=['reauth']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
+ )
+
+ required_if = [
+ ('state', 'present', ['username', 'password']),
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ login_result={}
+ )
+
+ manager = LoginManager(client, results)
+ manager.run()
+
+ if 'actions' in results:
+ del results['actions']
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_network.py b/ansible_collections/community/docker/plugins/modules/docker_network.py
new file mode 100644
index 00000000..db932363
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_network.py
@@ -0,0 +1,679 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_network
+short_description: Manage Docker networks
+description:
+ - Create/remove Docker networks and connect containers to them.
+ - Performs largely the same function as the C(docker network) CLI subcommand.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ name:
+ description:
+ - Name of the network to operate on.
+ type: str
+ required: true
+ aliases:
+ - network_name
+
+ connected:
+ description:
+ - List of container names or container IDs to connect to a network.
+ - Please note that the module only makes sure that these containers are connected to the network,
+ but does not care about connection options. If you rely on specific IP addresses etc., use the
+ M(community.docker.docker_container) module to ensure your containers are correctly connected to this network.
+ type: list
+ elements: str
+ default: []
+ aliases:
+ - containers
+
+ driver:
+ description:
+ - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
+ type: str
+ default: bridge
+
+ driver_options:
+ description:
+ - Dictionary of network settings. Consult docker docs for valid options and values.
+ type: dict
+ default: {}
+
+ force:
+ description:
+ - With state C(absent) forces disconnecting all containers from the
+ network prior to deleting the network. With state C(present) will
+ disconnect all containers, delete the network and re-create the
+ network.
+ - This option is required if you have changed the IPAM or driver options
+ and want an existing network to be updated to use the new options.
+ type: bool
+ default: false
+
+ appends:
+ description:
+ - By default the connected list is canonical, meaning containers not on the list are removed from the network.
+ - Use I(appends) to leave existing containers connected.
+ type: bool
+ default: false
+ aliases:
+ - incremental
+
+ enable_ipv6:
+ description:
+ - Enable IPv6 networking.
+ type: bool
+
+ ipam_driver:
+ description:
+ - Specify an IPAM driver.
+ type: str
+
+ ipam_driver_options:
+ description:
+ - Dictionary of IPAM driver options.
+ type: dict
+
+ ipam_config:
+ description:
+ - List of IPAM config blocks. Consult
+ L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
+ Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
+ type: list
+ elements: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ state:
+ description:
+ - C(absent) deletes the network. If a network has connected containers, it
+ cannot be deleted. Use the I(force) option to disconnect all containers
+ and delete the network.
+ - C(present) creates the network, if it does not already exist with the
+ specified parameters, and connects the list of containers provided via
+ the connected parameter. Containers not on the list will be disconnected.
+ An empty list will leave no containers connected to the network. Use the
+ I(appends) option to leave existing containers connected. Use the I(force)
+ options to force re-creation of the network.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+ internal:
+ description:
+ - Restrict external access to the network.
+ type: bool
+
+ labels:
+ description:
+ - Dictionary of labels.
+ type: dict
+ default: {}
+
+ scope:
+ description:
+ - Specify the network's scope.
+ type: str
+ choices:
+ - local
+ - global
+ - swarm
+
+ attachable:
+ description:
+ - If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
+ type: bool
+
+notes:
+ - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
+ It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
+ connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
+ network, loop the M(community.docker.docker_container) module to loop over your containers to make sure they are connected properly.
+ - The module does not support Docker Swarm. This means that it will not try to disconnect or reconnect services. If services are connected to the
+ network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
+ fail as well.
+
+author:
+ - "Ben Keith (@keitwb)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Create a network
+ community.docker.docker_network:
+ name: network_one
+
+- name: Remove all but selected list of containers
+ community.docker.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ - container_b
+ - container_c
+
+- name: Remove a single container
+ community.docker.docker_network:
+ name: network_one
+ connected: "{{ fulllist|difference(['container_a']) }}"
+
+- name: Add a container to a network, leaving existing containers connected
+ community.docker.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ appends: true
+
+- name: Create a network with driver options
+ community.docker.docker_network:
+ name: network_two
+ driver_options:
+ com.docker.network.bridge.name: net2
+
+- name: Create a network with custom IPAM config
+ community.docker.docker_network:
+ name: network_three
+ ipam_config:
+ - subnet: 172.23.27.0/24
+ gateway: 172.23.27.2
+ iprange: 172.23.27.0/26
+ aux_addresses:
+ host1: 172.23.27.3
+ host2: 172.23.27.4
+
+- name: Create a network with labels
+ community.docker.docker_network:
+ name: network_four
+ labels:
+ key1: value1
+ key2: value2
+
+- name: Create a network with IPv6 IPAM config
+ community.docker.docker_network:
+ name: network_ipv6_one
+ enable_ipv6: true
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce1::/64
+
+- name: Create a network with IPv6 and custom IPv4 IPAM config
+ community.docker.docker_network:
+ name: network_ipv6_two
+ enable_ipv6: true
+ ipam_config:
+ - subnet: 172.24.27.0/24
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+
+- name: Delete a network, disconnecting all containers
+ community.docker.docker_network:
+ name: network_one
+ state: absent
+ force: true
+'''
+
+RETURN = '''
+network:
+ description:
+ - Network inspection results for the affected network.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import re
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ DifferenceTracker,
+ clean_dict_booleans_for_docker_api,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.name = None
+ self.connected = None
+ self.driver = None
+ self.driver_options = None
+ self.ipam_driver = None
+ self.ipam_driver_options = None
+ self.ipam_config = None
+ self.appends = None
+ self.force = None
+ self.internal = None
+ self.labels = None
+ self.debug = None
+ self.enable_ipv6 = None
+ self.scope = None
+ self.attachable = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+def container_names_in_network(network):
+ return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
+
+
+CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
+CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
+
+
+def validate_cidr(cidr):
+ """Validate CIDR. Return IP version of a CIDR string on success.
+
+ :param cidr: Valid CIDR
+ :type cidr: str
+ :return: ``ipv4`` or ``ipv6``
+ :rtype: str
+ :raises ValueError: If ``cidr`` is not a valid CIDR
+ """
+ if CIDR_IPV4.match(cidr):
+ return 'ipv4'
+ elif CIDR_IPV6.match(cidr):
+ return 'ipv6'
+ raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
+
+
+def normalize_ipam_config_key(key):
+ """Normalizes IPAM config keys returned by Docker API to match Ansible keys.
+
+ :param key: Docker API key
+ :type key: str
+ :return Ansible module key
+ :rtype str
+ """
+ special_cases = {
+ 'AuxiliaryAddresses': 'aux_addresses'
+ }
+ return special_cases.get(key, key.lower())
+
+
+def dicts_are_essentially_equal(a, b):
+ """Make sure that a is a subset of b, where None entries of a are ignored."""
+ for k, v in a.items():
+ if v is None:
+ continue
+ if b.get(k) != v:
+ return False
+ return True
+
+
+class DockerNetworkManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_network = self.get_existing_network()
+
+ if not self.parameters.connected and self.existing_network:
+ self.parameters.connected = container_names_in_network(self.existing_network)
+
+ if self.parameters.ipam_config:
+ try:
+ for ipam_config in self.parameters.ipam_config:
+ validate_cidr(ipam_config['subnet'])
+ except ValueError as e:
+ self.client.fail(to_native(e))
+
+ if self.parameters.driver_options:
+ self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_network(self):
+ return self.client.get_network(name=self.parameters.name)
+
+ def has_different_config(self, net):
+ '''
+ Evaluates an existing network and returns a tuple containing a boolean
+ indicating if the configuration is different and a list of differences.
+
+ :param net: the inspection output for an existing network
+ :return: (bool, list)
+ '''
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != net['Driver']:
+ differences.add('driver',
+ parameter=self.parameters.driver,
+ active=net['Driver'])
+ if self.parameters.driver_options:
+ if not net.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=net.get('Options'))
+ else:
+ for key, value in self.parameters.driver_options.items():
+ if not (key in net['Options']) or value != net['Options'][key]:
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=net['Options'].get(key))
+
+ if self.parameters.ipam_driver:
+ if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
+ differences.add('ipam_driver',
+ parameter=self.parameters.ipam_driver,
+ active=net.get('IPAM'))
+
+ if self.parameters.ipam_driver_options is not None:
+ ipam_driver_options = net['IPAM'].get('Options') or {}
+ if ipam_driver_options != self.parameters.ipam_driver_options:
+ differences.add('ipam_driver_options',
+ parameter=self.parameters.ipam_driver_options,
+ active=ipam_driver_options)
+
+ if self.parameters.ipam_config is not None and self.parameters.ipam_config:
+ if not net.get('IPAM') or not net['IPAM']['Config']:
+ differences.add('ipam_config',
+ parameter=self.parameters.ipam_config,
+ active=net.get('IPAM', {}).get('Config'))
+ else:
+ # Put network's IPAM config into the same format as module's IPAM config
+ net_ipam_configs = []
+ for net_ipam_config in net['IPAM']['Config']:
+ config = dict()
+ for k, v in net_ipam_config.items():
+ config[normalize_ipam_config_key(k)] = v
+ net_ipam_configs.append(config)
+ # Compare lists of dicts as sets of dicts
+ for idx, ipam_config in enumerate(self.parameters.ipam_config):
+ net_config = dict()
+ for net_ipam_config in net_ipam_configs:
+ if dicts_are_essentially_equal(ipam_config, net_ipam_config):
+ net_config = net_ipam_config
+ break
+ for key, value in ipam_config.items():
+ if value is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value != net_config.get(key):
+ differences.add('ipam_config[%s].%s' % (idx, key),
+ parameter=value,
+ active=net_config.get(key))
+
+ if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
+ differences.add('enable_ipv6',
+ parameter=self.parameters.enable_ipv6,
+ active=net.get('EnableIPv6', False))
+
+ if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
+ differences.add('internal',
+ parameter=self.parameters.internal,
+ active=net.get('Internal'))
+
+ if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
+ differences.add('scope',
+ parameter=self.parameters.scope,
+ active=net.get('Scope'))
+
+ if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
+ differences.add('attachable',
+ parameter=self.parameters.attachable,
+ active=net.get('Attachable'))
+ if self.parameters.labels:
+ if not net.get('Labels'):
+ differences.add('labels',
+ parameter=self.parameters.labels,
+ active=net.get('Labels'))
+ else:
+ for key, value in self.parameters.labels.items():
+ if not (key in net['Labels']) or value != net['Labels'][key]:
+ differences.add('labels.%s' % key,
+ parameter=value,
+ active=net['Labels'].get(key))
+
+ return not differences.empty, differences
+
+ def create_network(self):
+ if not self.existing_network:
+ data = {
+ 'Name': self.parameters.name,
+ 'Driver': self.parameters.driver,
+ 'Options': self.parameters.driver_options,
+ 'IPAM': None,
+ 'CheckDuplicate': None,
+ }
+
+ if self.parameters.enable_ipv6:
+ data['EnableIPv6'] = True
+ if self.parameters.internal:
+ data['Internal'] = True
+ if self.parameters.scope is not None:
+ data['Scope'] = self.parameters.scope
+ if self.parameters.attachable is not None:
+ data['Attachable'] = self.parameters.attachable
+ if self.parameters.labels is not None:
+ data["Labels"] = self.parameters.labels
+
+ ipam_pools = []
+ if self.parameters.ipam_config:
+ for ipam_pool in self.parameters.ipam_config:
+ ipam_pools.append({
+ 'Subnet': ipam_pool['subnet'],
+ 'IPRange': ipam_pool['iprange'],
+ 'Gateway': ipam_pool['gateway'],
+ 'AuxiliaryAddresses': ipam_pool['aux_addresses'],
+ })
+
+ if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
+ # Only add IPAM if a driver was specified or if IPAM parameters were
+ # specified. Leaving this parameter out can significantly speed up
+ # creation; on my machine creation with this option needs ~15 seconds,
+ # and without just a few seconds.
+ data['IPAM'] = {
+ 'Driver': self.parameters.ipam_driver,
+ 'Config': ipam_pools or [],
+ 'Options': self.parameters.ipam_driver_options,
+ }
+
+ if not self.check_mode:
+ resp = self.client.post_json_to_json('/networks/create', data=data)
+ self.client.report_warnings(resp, ['Warning'])
+ self.existing_network = self.client.get_network(network_id=resp['Id'])
+ self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_network(self):
+ if self.existing_network:
+ self.disconnect_all_containers()
+ if not self.check_mode:
+ self.client.delete_call('/networks/{0}', self.parameters.name)
+ self.results['actions'].append("Removed network %s" % (self.parameters.name,))
+ self.results['changed'] = True
+
+ def is_container_connected(self, container_name):
+ if not self.existing_network:
+ return False
+ return container_name in container_names_in_network(self.existing_network)
+
+ def connect_containers(self):
+ for name in self.parameters.connected:
+ if not self.is_container_connected(name):
+ if not self.check_mode:
+ data = {
+ "Container": name,
+ "EndpointConfig": None,
+ }
+ self.client.post_json('/networks/{0}/connect', self.parameters.name, data=data)
+ self.results['actions'].append("Connected container %s" % (name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(name), parameter=True, active=False)
+
+ def disconnect_missing(self):
+ if not self.existing_network:
+ return
+ containers = self.existing_network['Containers']
+ if not containers:
+ return
+ for c in containers.values():
+ name = c['Name']
+ if name not in self.parameters.connected:
+ self.disconnect_container(name)
+
+ def disconnect_all_containers(self):
+ containers = self.client.get_network(name=self.parameters.name)['Containers']
+ if not containers:
+ return
+ for cont in containers.values():
+ self.disconnect_container(cont['Name'])
+
+ def disconnect_container(self, container_name):
+ if not self.check_mode:
+ data = {"Container": container_name}
+ self.client.post_json('/networks/{0}/disconnect', self.parameters.name, data=data)
+ self.results['actions'].append("Disconnected container %s" % (container_name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(container_name),
+ parameter=False,
+ active=True)
+
+ def present(self):
+ different = False
+ differences = DifferenceTracker()
+ if self.existing_network:
+ different, differences = self.has_different_config(self.existing_network)
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
+ if self.parameters.force or different:
+ self.remove_network()
+ self.existing_network = None
+
+ self.create_network()
+ self.connect_containers()
+ if not self.parameters.appends:
+ self.disconnect_missing()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ network_facts = self.get_existing_network()
+ self.results['network'] = network_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
+ self.remove_network()
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['network_name']),
+ connected=dict(type='list', default=[], elements='str', aliases=['containers']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='bridge'),
+ driver_options=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ appends=dict(type='bool', default=False, aliases=['incremental']),
+ ipam_driver=dict(type='str'),
+ ipam_driver_options=dict(type='dict'),
+ ipam_config=dict(type='list', elements='dict', options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ )),
+ enable_ipv6=dict(type='bool'),
+ internal=dict(type='bool'),
+ labels=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False),
+ scope=dict(type='str', choices=['local', 'global', 'swarm']),
+ attachable=dict(type='bool'),
+ )
+
+ option_minimal_versions = dict(
+ scope=dict(docker_api_version='1.30'),
+ attachable=dict(docker_api_version='1.26'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ # "The docker server >= 1.10.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerNetworkManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_network_info.py b/ansible_collections/community/docker/plugins/modules/docker_network_info.py
new file mode 100644
index 00000000..9818baad
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_network_info.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_network_info
+
+short_description: Retrieves facts about docker network
+
+description:
+ - Retrieves facts about a docker network.
+ - Essentially returns the output of C(docker network inspect <name>), similar to what M(community.docker.docker_network)
+ returns for a non-absent network.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - The name of the network to inspect.
+ - When identifying an existing network name may be a name or a long or short network ID.
+ type: str
+ required: true
+
+author:
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get infos on network
+ community.docker.docker_network_info:
+ name: mydata
+ register: result
+
+- name: Does network exist?
+ ansible.builtin.debug:
+ msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about network
+ ansible.builtin.debug:
+ var: result.network
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the network exists.
+ type: bool
+ returned: always
+ sample: true
+network:
+ description:
+ - Facts representing the current state of the network. Matches the docker inspection output.
+ - Will be C(none) if network does not exist.
+ returned: always
+ type: dict
+ sample: {
+ "Attachable": false,
+ "ConfigFrom": {
+ "Network": ""
+ },
+ "ConfigOnly": false,
+ "Containers": {},
+ "Created": "2018-12-07T01:47:51.250835114-06:00",
+ "Driver": "bridge",
+ "EnableIPv6": false,
+ "IPAM": {
+ "Config": [
+ {
+ "Gateway": "192.168.96.1",
+ "Subnet": "192.168.96.0/20"
+ }
+ ],
+ "Driver": "default",
+ "Options": null
+ },
+ "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
+ "Ingress": false,
+ "Internal": false,
+ "Labels": {},
+ "Name": "ansible-test-f2700bba",
+ "Options": {},
+ "Scope": "local"
+ }
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ network = client.get_network(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if network else False),
+ network=network,
+ )
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_node.py b/ansible_collections/community/docker/plugins/modules/docker_node.py
new file mode 100644
index 00000000..d097b07f
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_node.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_node
+short_description: Manage Docker Swarm node
+description:
+ - Manages the Docker nodes via Swarm Manager.
+ - This module allows to change the node's role, its availability, and to modify, add or remove node labels.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ hostname:
+ description:
+ - The hostname or ID of node as registered in Swarm.
+ - If more than one node is registered using the same hostname the ID must be used,
+ otherwise module will fail.
+ type: str
+ required: true
+ labels:
+ description:
+ - User-defined key/value metadata that will be assigned as node attribute.
+ - Label operations in this module apply to the docker swarm node specified by I(hostname).
+ Use M(community.docker.docker_swarm) module to add/modify/remove swarm cluster labels.
+ - The actual state of labels assigned to the node when module completes its work depends on
+ I(labels_state) and I(labels_to_remove) parameters values. See description below.
+ type: dict
+ labels_state:
+ description:
+ - It defines the operation on the labels assigned to node and labels specified in I(labels) option.
+ - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node.
+ If no labels are assigned then it will add listed labels. For labels that are already assigned
+ to the node, it will update their values. The labels not specified in I(labels) will remain unchanged.
+ If I(labels) is empty then no changes will be made.
+ - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then
+ all labels assigned to the node will be removed.
+ type: str
+ default: 'merge'
+ choices:
+ - merge
+ - replace
+ labels_to_remove:
+ description:
+ - List of labels that will be removed from the node configuration. The list has to contain only label
+ names, not their values.
+ - If the label provided on the list is not assigned to the node, the entry is ignored.
+ - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains
+ assigned to the node.
+ - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to
+ node are removed and I(labels_to_remove) is ignored.
+ type: list
+ elements: str
+ availability:
+ description: Node availability to assign. If not provided then node availability remains unchanged.
+ choices:
+ - active
+ - pause
+ - drain
+ type: str
+ role:
+ description: Node role to assign. If not provided then node role remains unchanged.
+ choices:
+ - manager
+ - worker
+ type: str
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - Docker API >= 1.25
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+ - Thierry Bouvet (@tbouvet)
+
+'''
+
+EXAMPLES = '''
+- name: Set node role
+ community.docker.docker_node:
+ hostname: mynode
+ role: manager
+
+- name: Set node availability
+ community.docker.docker_node:
+ hostname: mynode
+ availability: drain
+
+- name: Replace node labels with new labels
+ community.docker.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+ labels_state: replace
+
+- name: Merge node labels and new labels
+ community.docker.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+
+- name: Remove all labels assigned to node
+ community.docker.docker_node:
+ hostname: mynode
+ labels_state: replace
+
+- name: Remove selected labels from the node
+ community.docker.docker_node:
+ hostname: mynode
+ labels_to_remove:
+ - key1
+ - key2
+'''
+
+RETURN = '''
+node:
+ description: Information about node after 'update' operation
+ returned: success
+ type: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ DockerBaseClass,
+ RequestException,
+)
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+
+ # Spec
+ self.name = None
+ self.labels = None
+ self.labels_state = None
+ self.labels_to_remove = None
+
+ # Node
+ self.availability = None
+ self.role = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+class SwarmNodeManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmNodeManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.parameters = TaskParameters(client)
+
+ self.node_update()
+
+ def node_update(self):
+ if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
+ self.client.fail("This node is not part of a swarm.")
+ return
+
+ if self.client.check_if_swarm_node_is_down():
+ self.client.fail("Can not update the node. The node is down.")
+
+ try:
+ node_info = self.client.inspect_node(node_id=self.parameters.hostname)
+ except APIError as exc:
+ self.client.fail("Failed to get node information for %s" % to_native(exc))
+
+ changed = False
+ node_spec = dict(
+ Availability=self.parameters.availability,
+ Role=self.parameters.role,
+ Labels=self.parameters.labels,
+ )
+
+ if self.parameters.role is None:
+ node_spec['Role'] = node_info['Spec']['Role']
+ else:
+ if not node_info['Spec']['Role'] == self.parameters.role:
+ node_spec['Role'] = self.parameters.role
+ changed = True
+
+ if self.parameters.availability is None:
+ node_spec['Availability'] = node_info['Spec']['Availability']
+ else:
+ if not node_info['Spec']['Availability'] == self.parameters.availability:
+ node_info['Spec']['Availability'] = self.parameters.availability
+ changed = True
+
+ if self.parameters.labels_state == 'replace':
+ if self.parameters.labels is None:
+ node_spec['Labels'] = {}
+ if node_info['Spec']['Labels']:
+ changed = True
+ else:
+ if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
+ node_spec['Labels'] = self.parameters.labels
+ changed = True
+ elif self.parameters.labels_state == 'merge':
+ node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
+ if self.parameters.labels is not None:
+ for key, value in self.parameters.labels.items():
+ if node_spec['Labels'].get(key) != value:
+ node_spec['Labels'][key] = value
+ changed = True
+
+ if self.parameters.labels_to_remove is not None:
+ for key in self.parameters.labels_to_remove:
+ if self.parameters.labels is not None:
+ if not self.parameters.labels.get(key):
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+ else:
+ self.client.module.warn(
+ "Label '%s' listed both in 'labels' and 'labels_to_remove'. "
+ "Keeping the assigned label value."
+ % to_native(key))
+ else:
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+
+ if changed is True:
+ if not self.check_mode:
+ try:
+ self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
+ node_spec=node_spec)
+ except APIError as exc:
+ self.client.fail("Failed to update node : %s" % to_native(exc))
+ self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
+ self.results['changed'] = changed
+ else:
+ self.results['node'] = node_info
+ self.results['changed'] = changed
+
+
+def main():
+ argument_spec = dict(
+ hostname=dict(type='str', required=True),
+ labels=dict(type='dict'),
+ labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
+ labels_to_remove=dict(type='list', elements='str'),
+ availability=dict(type='str', choices=['active', 'pause', 'drain']),
+ role=dict(type='str', choices=['worker', 'manager']),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ SwarmNodeManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_node_info.py b/ansible_collections/community/docker/plugins/modules/docker_node_info.py
new file mode 100644
index 00000000..d943db31
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_node_info.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_node_info
+
+short_description: Retrieves facts about docker swarm node from Swarm Manager
+
+description:
+ - Retrieves facts about a docker node.
+ - Essentially returns the output of C(docker node inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - The name of the node to inspect.
+ - The list of nodes names to inspect.
+ - If empty then return information of all nodes in Swarm cluster.
+ - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
+ - If I(self) is C(true) then this parameter is ignored.
+ type: list
+ elements: str
+ self:
+ description:
+ - If C(true), queries the node (that is, the docker daemon) the module communicates with.
+ - If C(true) then I(name) is ignored.
+ - If C(false) then query depends on I(name) presence and value.
+ type: bool
+ default: false
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get info on all nodes
+ community.docker.docker_node_info:
+ register: result
+
+- name: Get info on node
+ community.docker.docker_node_info:
+ name: mynode
+ register: result
+
+- name: Get info on list of nodes
+ community.docker.docker_node_info:
+ name:
+ - mynode1
+ - mynode2
+ register: result
+
+- name: Get info on host if it is Swarm Manager
+ community.docker.docker_node_info:
+ self: true
+ register: result
+'''
+
+RETURN = '''
+nodes:
+ description:
+ - Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
+ - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided.
+ - If I(name) contains a list of nodes, the output will provide information on all nodes registered
+ at the swarm, including nodes that left the swarm but have not been removed from the cluster on swarm
+ managers and nodes that are unreachable.
+ returned: always
+ type: list
+ elements: dict
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_node_facts(client):
+
+ results = []
+
+ if client.module.params['self'] is True:
+ self_node_id = client.get_swarm_node_id()
+ node_info = client.get_node_inspect(node_id=self_node_id)
+ results.append(node_info)
+ return results
+
+ if client.module.params['name'] is None:
+ node_info = client.get_all_nodes_inspect()
+ return node_info
+
+ nodes = client.module.params['name']
+ if not isinstance(nodes, list):
+ nodes = [nodes]
+
+ for next_node_name in nodes:
+ next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
+ if next_node_info:
+ results.append(next_node_info)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ self=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ nodes = get_node_facts(client)
+
+ client.module.exit_json(
+ changed=False,
+ nodes=nodes,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_plugin.py b/ansible_collections/community/docker/plugins/modules/docker_plugin.py
new file mode 100644
index 00000000..1d46e3a8
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_plugin.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright (c) 2021 Red Hat | Ansible Sakar Mehra<@sakarmehra100@gmail.com | @sakar97>
+# Copyright (c) 2019, Vladimir Porshkevich (@porshkevich) <neosonic@mail.ru>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_plugin
+short_description: Manage Docker plugins
+version_added: 1.3.0
+description:
+ - This module allows to install, delete, enable and disable Docker plugins.
+ - Performs largely the same function as the C(docker plugin) CLI subcommand.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ plugin_name:
+ description:
+ - Name of the plugin to operate on.
+ required: true
+ type: str
+
+ state:
+ description:
+ - C(absent) remove the plugin.
+ - C(present) install the plugin, if it does not already exist.
+ - C(enable) enable the plugin.
+ - C(disable) disable the plugin.
+ default: present
+ choices:
+ - absent
+ - present
+ - enable
+ - disable
+ type: str
+
+ alias:
+ description:
+ - Local name for plugin.
+ type: str
+ version_added: 1.8.0
+
+ plugin_options:
+ description:
+ - Dictionary of plugin settings.
+ type: dict
+ default: {}
+
+ force_remove:
+ description:
+ - Remove even if the plugin is enabled.
+ default: False
+ type: bool
+
+ enable_timeout:
+ description:
+ - Timeout in seconds.
+ type: int
+ default: 0
+
+author:
+ - Sakar Mehra (@sakar97)
+ - Vladimir Porshkevich (@porshkevich)
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Install a plugin
+ community.docker.docker_plugin:
+ plugin_name: plugin_one
+ state: present
+
+- name: Remove a plugin
+ community.docker.docker_plugin:
+ plugin_name: plugin_one
+ state: absent
+
+- name: Enable the plugin
+ community.docker.docker_plugin:
+ plugin_name: plugin_one
+ state: enable
+
+- name: Disable the plugin
+ community.docker.docker_plugin:
+ plugin_name: plugin_one
+ state: disable
+
+- name: Install a plugin with options
+ community.docker.docker_plugin:
+ plugin_name: weaveworks/net-plugin:latest_release
+ plugin_options:
+ IPALLOC_RANGE: "10.32.0.0/12"
+ WEAVE_PASSWORD: "PASSWORD"
+'''
+
+RETURN = '''
+plugin:
+ description:
+ - Plugin inspection results for the affected plugin.
+ returned: success
+ type: dict
+ sample: {}
+actions:
+ description:
+ - List of actions performed during task execution.
+ returned: when I(state!=absent)
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ DifferenceTracker,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api import auth
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+ self.plugin_name = None
+ self.alias = None
+ self.plugin_options = None
+ self.debug = None
+ self.force_remove = None
+ self.enable_timeout = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+def prepare_options(options):
+ return ['%s=%s' % (k, v if v is not None else "") for k, v in options.items()] if options else []
+
+
+def parse_options(options_list):
+ return dict(x.split('=', 1) for x in options_list) if options_list else {}
+
+
+class DockerPluginManager(object):
+
+ def __init__(self, client):
+ self.client = client
+
+ self.parameters = TaskParameters(client)
+ self.preferred_name = self.parameters.alias or self.parameters.plugin_name
+ self.check_mode = self.client.check_mode
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.actions = []
+ self.changed = False
+
+ self.existing_plugin = self.get_existing_plugin()
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+ elif state == 'enable':
+ self.enable()
+ elif state == 'disable':
+ self.disable()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.diff = self.diff_result
+
+ def get_existing_plugin(self):
+ try:
+ return self.client.get_json('/plugins/{0}/json', self.preferred_name)
+ except NotFound:
+ return None
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ def has_different_config(self):
+ """
+ Return the list of differences between the current parameters and the existing plugin.
+
+ :return: list of options that differ
+ """
+ differences = DifferenceTracker()
+ if self.parameters.plugin_options:
+ settings = self.existing_plugin.get('Settings')
+ if not settings:
+ differences.add('plugin_options', parameters=self.parameters.plugin_options, active=settings)
+ else:
+ existing_options = parse_options(settings.get('Env'))
+
+ for key, value in self.parameters.plugin_options.items():
+ if ((not existing_options.get(key) and value) or
+ not value or
+ value != existing_options[key]):
+ differences.add('plugin_options.%s' % key,
+ parameter=value,
+ active=existing_options.get(key))
+
+ return differences
+
+ def install_plugin(self):
+ if not self.existing_plugin:
+ if not self.check_mode:
+ try:
+ # Get privileges
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(self.parameters.plugin_name)
+ header = auth.get_config_header(self.client, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ privileges = self.client.get_json('/plugins/privileges', params={'remote': self.parameters.plugin_name}, headers=headers)
+ # Pull plugin
+ params = {
+ 'remote': self.parameters.plugin_name,
+ }
+ if self.parameters.alias:
+ params['name'] = self.parameters.alias
+ response = self.client._post_json(self.client._url('/plugins/pull'), params=params, headers=headers, data=privileges, stream=True)
+ self.client._raise_for_status(response)
+ for data in self.client._stream_helper(response, decode=True):
+ pass
+ # Inspect and configure plugin
+ self.existing_plugin = self.client.get_json('/plugins/{0}/json', self.preferred_name)
+ if self.parameters.plugin_options:
+ data = prepare_options(self.parameters.plugin_options)
+ self.client.post_json('/plugins/{0}/set', self.preferred_name, data=data)
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ self.actions.append("Installed plugin %s" % self.preferred_name)
+ self.changed = True
+
+ def remove_plugin(self):
+ force = self.parameters.force_remove
+ if self.existing_plugin:
+ if not self.check_mode:
+ try:
+ self.client.delete_call('/plugins/{0}', self.preferred_name, params={'force': force})
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ self.actions.append("Removed plugin %s" % self.preferred_name)
+ self.changed = True
+
+ def update_plugin(self):
+ if self.existing_plugin:
+ differences = self.has_different_config()
+ if not differences.empty:
+ if not self.check_mode:
+ try:
+ data = prepare_options(self.parameters.plugin_options)
+ self.client.post_json('/plugins/{0}/set', self.preferred_name, data=data)
+ except APIError as e:
+ self.client.fail(to_native(e))
+ self.actions.append("Updated plugin %s settings" % self.preferred_name)
+ self.changed = True
+ else:
+ self.client.fail("Cannot update the plugin: Plugin does not exist")
+
+ def present(self):
+ differences = DifferenceTracker()
+ if self.existing_plugin:
+ differences = self.has_different_config()
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_plugin is not None)
+
+ if self.existing_plugin:
+ self.update_plugin()
+ else:
+ self.install_plugin()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.actions = None
+
+ def absent(self):
+ self.remove_plugin()
+
+ def enable(self):
+ timeout = self.parameters.enable_timeout
+ if self.existing_plugin:
+ if not self.existing_plugin.get('Enabled'):
+ if not self.check_mode:
+ try:
+ self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout})
+ except APIError as e:
+ self.client.fail(to_native(e))
+ self.actions.append("Enabled plugin %s" % self.preferred_name)
+ self.changed = True
+ else:
+ self.install_plugin()
+ if not self.check_mode:
+ try:
+ self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout})
+ except APIError as e:
+ self.client.fail(to_native(e))
+ self.actions.append("Enabled plugin %s" % self.preferred_name)
+ self.changed = True
+
+ def disable(self):
+ if self.existing_plugin:
+ if self.existing_plugin.get('Enabled'):
+ if not self.check_mode:
+ try:
+ self.client.post_json('/plugins/{0}/disable', self.preferred_name)
+ except APIError as e:
+ self.client.fail(to_native(e))
+ self.actions.append("Disable plugin %s" % self.preferred_name)
+ self.changed = True
+ else:
+ self.client.fail("Plugin not found: Plugin does not exist.")
+
+ @property
+ def result(self):
+ plugin_data = {}
+ if self.parameters.state != 'absent':
+ try:
+ plugin_data = self.client.get_json('/plugins/{0}/json', self.preferred_name)
+ except NotFound:
+ # This can happen in check mode
+ pass
+ result = {
+ 'actions': self.actions,
+ 'changed': self.changed,
+ 'diff': self.diff,
+ 'plugin': plugin_data,
+ }
+ return dict((k, v) for k, v in result.items() if v is not None)
+
+
+def main():
+ argument_spec = dict(
+ alias=dict(type='str'),
+ plugin_name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enable', 'disable']),
+ plugin_options=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False),
+ force_remove=dict(type='bool', default=False),
+ enable_timeout=dict(type='int', default=0),
+ )
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ cm = DockerPluginManager(client)
+ client.module.exit_json(**cm.result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_prune.py b/ansible_collections/community/docker/plugins/modules/docker_prune.py
new file mode 100644
index 00000000..1557f85a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_prune.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_prune
+
+short_description: Allows to prune various docker objects
+
+description:
+ - Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
+ and C(docker volume prune) via the Docker API.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ containers:
+ description:
+ - Whether to prune containers.
+ type: bool
+ default: false
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to delete.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to prune images.
+ type: bool
+ default: false
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to delete.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to prune networks.
+ type: bool
+ default: false
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to prune volumes.
+ type: bool
+ default: false
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ builder_cache:
+ description:
+ - Whether to prune the builder cache.
+ type: bool
+ default: false
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+notes:
+ - The module always returned C(changed=false) before community.docker 3.5.1.
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Prune containers older than 24h
+ community.docker.docker_prune:
+ containers: true
+ containers_filters:
+ # only consider containers created more than 24 hours ago
+ until: 24h
+
+- name: Prune everything
+ community.docker.docker_prune:
+ containers: true
+ images: true
+ networks: true
+ volumes: true
+ builder_cache: true
+
+- name: Prune everything (including non-dangling images)
+ community.docker.docker_prune:
+ containers: true
+ images: true
+ images_filters:
+ dangling: false
+ networks: true
+ volumes: true
+ builder_cache: true
+'''
+
+RETURN = '''
+# containers
+containers:
+ description:
+ - List of IDs of deleted containers.
+ returned: I(containers) is C(true)
+ type: list
+ elements: str
+ sample: []
+containers_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from container pruning in bytes.
+ returned: I(containers) is C(true)
+ type: int
+ sample: 0
+
+# images
+images:
+ description:
+ - List of IDs of deleted images.
+ returned: I(images) is C(true)
+ type: list
+ elements: str
+ sample: []
+images_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from image pruning in bytes.
+ returned: I(images) is C(true)
+ type: int
+ sample: 0
+
+# networks
+networks:
+ description:
+ - List of IDs of deleted networks.
+ returned: I(networks) is C(true)
+ type: list
+ elements: str
+ sample: []
+
+# volumes
+volumes:
+ description:
+ - List of IDs of deleted volumes.
+ returned: I(volumes) is C(true)
+ type: list
+ elements: str
+ sample: []
+volumes_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from volumes pruning in bytes.
+ returned: I(volumes) is C(true)
+ type: int
+ sample: 0
+
+# builder_cache
+builder_cache_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from builder cache pruning in bytes.
+ returned: I(builder_cache) is C(true)
+ type: int
+ sample: 0
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import clean_dict_booleans_for_docker_api
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import convert_filters
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ builder_cache=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ # supports_check_mode=True,
+ )
+
+ try:
+ result = dict()
+ changed = False
+
+ if client.module.params['containers']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
+ params = {'filters': convert_filters(filters)}
+ res = client.post_to_json('/containers/prune', params=params)
+ result['containers'] = res.get('ContainersDeleted') or []
+ result['containers_space_reclaimed'] = res['SpaceReclaimed']
+ if result['containers'] or result['containers_space_reclaimed']:
+ changed = True
+
+ if client.module.params['images']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
+ params = {'filters': convert_filters(filters)}
+ res = client.post_to_json('/images/prune', params=params)
+ result['images'] = res.get('ImagesDeleted') or []
+ result['images_space_reclaimed'] = res['SpaceReclaimed']
+ if result['images'] or result['images_space_reclaimed']:
+ changed = True
+
+ if client.module.params['networks']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
+ params = {'filters': convert_filters(filters)}
+ res = client.post_to_json('/networks/prune', params=params)
+ result['networks'] = res.get('NetworksDeleted') or []
+ if result['networks']:
+ changed = True
+
+ if client.module.params['volumes']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
+ params = {'filters': convert_filters(filters)}
+ res = client.post_to_json('/volumes/prune', params=params)
+ result['volumes'] = res.get('VolumesDeleted') or []
+ result['volumes_space_reclaimed'] = res['SpaceReclaimed']
+ if result['volumes'] or result['volumes_space_reclaimed']:
+ changed = True
+
+ if client.module.params['builder_cache']:
+ res = client.post_to_json('/build/prune')
+ result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
+ if result['builder_cache_space_reclaimed']:
+ changed = True
+
+ result['changed'] = changed
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_secret.py b/ansible_collections/community/docker/plugins/modules/docker_secret.py
new file mode 100644
index 00000000..546756a4
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_secret.py
@@ -0,0 +1,406 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_secret
+
+short_description: Manage docker secrets.
+
+description:
+ - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
+ - Adds to the metadata of new secrets C(ansible_key), an encrypted hash representation of the data, which is then used
+ in future runs to test if a secret has changed. If C(ansible_key) is not present, then a secret will not be updated
+ unless the I(force) option is set.
+ - Updates to secrets are performed by removing the secret and creating it again.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_2_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ data:
+ description:
+ - The value of the secret.
+ - Mutually exclusive with I(data_src). One of I(data) and I(data_src) is required if I(state=present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: false
+ data_src:
+ description:
+ - The file on the target from which to read the secret.
+ - Mutually exclusive with I(data). One of I(data) and I(data_src) is required if I(state=present).
+ type: path
+ version_added: 1.10.0
+ labels:
+ description:
+ - "A map of key:value meta data, where both key and value are expected to be strings."
+ - If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing secret.
+ - If C(true), an existing secret will be replaced, even if it has not changed.
+ type: bool
+ default: false
+ rolling_versions:
+ description:
+ - If set to C(true), secrets are created with an increasing version number appended to their name.
+ - Adds a label containing the version number to the managed secrets with the name C(ansible_version).
+ type: bool
+ default: false
+ version_added: 2.2.0
+ versions_to_keep:
+ description:
+ - When using I(rolling_versions), the number of old versions of the secret to keep.
+ - Extraneous old secrets are deleted after the new one is created.
+ - Set to C(-1) to keep everything or to C(0) or C(1) to keep only the current one.
+ type: int
+ default: 5
+ version_added: 2.2.0
+ name:
+ description:
+ - The name of the secret.
+ type: str
+ required: true
+ state:
+ description:
+ - Set to C(present), if the secret should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Create secret foo (from a file on the control machine)
+ community.docker.docker_secret:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Create secret foo (from a file on the target machine)
+ community.docker.docker_secret:
+ name: foo
+ data_src: /path/to/secret/file
+ state: present
+
+- name: Change the secret data
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the secret
+ two: '2'
+ state: present
+
+- name: No change
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing secret
+ state: present
+
+- name: Update an existing label
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the secret
+ one: '1'
+ state: present
+
+- name: Force the removal/creation of the secret
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ force: true
+ state: present
+
+- name: Remove secret foo
+ community.docker.docker_secret:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+secret_id:
+ description:
+ - The ID assigned by Docker to the secret object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+secret_name:
+ description:
+ - The name of the created secret object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'awesome_secret'
+ version_added: 2.2.0
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ compare_generic,
+)
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+
+
+class SecretManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SecretManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ data_src = parameters.get('data_src')
+ if data_src is not None:
+ try:
+ with open(data_src, 'rb') as f:
+ self.data = f.read()
+ except Exception as exc:
+ self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc)))
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.rolling_versions = parameters.get('rolling_versions')
+ self.versions_to_keep = parameters.get('versions_to_keep')
+
+ if self.rolling_versions:
+ self.version = 0
+ self.data_key = None
+ self.secrets = []
+
+ def __call__(self):
+ self.get_secret()
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ self.remove_old_versions()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_version(self, secret):
+ try:
+ return int(secret.get('Spec', {}).get('Labels', {}).get('ansible_version', 0))
+ except ValueError:
+ return 0
+
+ def remove_old_versions(self):
+ if not self.rolling_versions or self.versions_to_keep < 0:
+ return
+ if not self.check_mode:
+ while len(self.secrets) > max(self.versions_to_keep, 1):
+ self.remove_secret(self.secrets.pop(0))
+
+ def get_secret(self):
+ ''' Find an existing secret. '''
+ try:
+ secrets = self.client.secrets(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
+
+ if self.rolling_versions:
+ self.secrets = [
+ secret
+ for secret in secrets
+ if secret['Spec']['Name'].startswith('{name}_v'.format(name=self.name))
+ ]
+ self.secrets.sort(key=self.get_version)
+ else:
+ self.secrets = [
+ secret for secret in secrets if secret['Spec']['Name'] == self.name
+ ]
+
+ def create_secret(self):
+ ''' Create a new secret '''
+ secret_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.rolling_versions:
+ self.version += 1
+ labels['ansible_version'] = str(self.version)
+ self.name = '{name}_v{version}'.format(name=self.name, version=self.version)
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ secret_id = self.client.create_secret(self.name, self.data, labels=labels)
+ self.secrets += self.client.secrets(filters={'id': secret_id})
+ except APIError as exc:
+ self.client.fail("Error creating secret: %s" % to_native(exc))
+
+ if isinstance(secret_id, dict):
+ secret_id = secret_id['ID']
+
+ return secret_id
+
+ def remove_secret(self, secret):
+ try:
+ if not self.check_mode:
+ self.client.remove_secret(secret['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing secret %s: %s" % (secret['Spec']['Name'], to_native(exc)))
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the secret '''
+ if self.secrets:
+ secret = self.secrets[-1]
+ self.results['secret_id'] = secret['ID']
+ self.results['secret_name'] = secret['Spec']['Name']
+ data_changed = False
+ attrs = secret.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ else:
+ if not self.force:
+ self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'true'")
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if self.rolling_versions:
+ self.version = self.get_version(secret)
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the secret
+ if not self.rolling_versions:
+ self.absent()
+ secret_id = self.create_secret()
+ self.results['changed'] = True
+ self.results['secret_id'] = secret_id
+ self.results['secret_name'] = self.name
+ else:
+ self.results['changed'] = True
+ self.results['secret_id'] = self.create_secret()
+ self.results['secret_name'] = self.name
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the secret '''
+ if self.secrets:
+ for secret in self.secrets:
+ self.remove_secret(secret)
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str', no_log=True),
+ data_is_b64=dict(type='bool', default=False),
+ data_src=dict(type='path'),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False),
+ rolling_versions=dict(type='bool', default=False),
+ versions_to_keep=dict(type='int', default=5),
+ )
+
+ required_if = [
+ ('state', 'present', ['data', 'data_src'], True),
+ ]
+
+ mutually_exclusive = [
+ ('data', 'data_src'),
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive,
+ min_docker_version='2.1.0',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ secret_id='',
+ secret_name=''
+ )
+
+ SecretManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_stack.py b/ansible_collections/community/docker/plugins/modules/docker_stack.py
new file mode 100644
index 00000000..98f4c3ad
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_stack.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_stack
+author: "Dario Zanzico (@dariko)"
+short_description: docker stack module
+description:
+ - Manage docker stacks using the C(docker stack) command
+ on the target node (see examples).
+extends_documentation_fragment:
+ - community.docker.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Stack name
+ type: str
+ required: true
+ state:
+ description:
+ - Service state.
+ type: str
+ default: "present"
+ choices:
+ - present
+ - absent
+ compose:
+ description:
+ - List of compose definitions. Any element may be a string
+ referring to the path of the compose file on the target host
+ or the YAML contents of a compose file nested as dictionary.
+ type: list
+ elements: raw
+ default: []
+ prune:
+ description:
+ - If true will add the C(--prune) option to the C(docker stack deploy) command.
+ This will have docker remove the services not present in the
+ current stack definition.
+ type: bool
+ default: false
+ with_registry_auth:
+ description:
+ - If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
+ This will have docker send registry authentication details to Swarm agents.
+ type: bool
+ default: false
+ resolve_image:
+ description:
+ - If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
+ This will have docker query the registry to resolve image digest and
+ supported platforms. If not set, docker use "always" by default.
+ type: str
+ choices: ["always", "changed", "never"]
+ absent_retries:
+ description:
+ - If C(>0) and I(state) is C(absent) the module will retry up to
+ I(absent_retries) times to delete the stack until all the
+ resources have been effectively deleted.
+ If the last try still reports the stack as not completely
+ removed the module will fail.
+ type: int
+ default: 0
+ absent_retries_interval:
+ description:
+ - Interval in seconds between consecutive I(absent_retries).
+ type: int
+ default: 1
+
+requirements:
+ - jsondiff
+ - pyyaml
+'''
+
+RETURN = '''
+stack_spec_diff:
+ description: |
+ dictionary containing the differences between the 'Spec' field
+ of the stack services before and after applying the new stack
+ definition.
+ sample: >
+ "stack_spec_diff":
+ {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
+ returned: on change
+ type: dict
+'''
+
+EXAMPLES = '''
+ - name: Deploy stack from a compose file
+ community.docker.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+
+ - name: Deploy stack from base compose file and override the web service
+ community.docker.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+ - version: '3'
+ services:
+ web:
+ image: nginx:latest
+ environment:
+ ENVVAR: envvar
+
+ - name: Remove stack
+ community.docker.docker_stack:
+ name: mystack
+ state: absent
+'''
+
+
+import json
+import tempfile
+from ansible.module_utils.six import string_types
+from time import sleep
+
+try:
+ from jsondiff import diff as json_diff
+ HAS_JSONDIFF = True
+except ImportError:
+ HAS_JSONDIFF = False
+
+try:
+ from yaml import dump as yaml_dump
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, os
+
+
+def docker_stack_services(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "stack",
+ "services",
+ stack_name,
+ "--format",
+ "{{.Name}}"])
+ if err == "Nothing found in stack: %s\n" % stack_name:
+ return []
+ return out.strip().split('\n')
+
+
+def docker_service_inspect(module, service_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "service",
+ "inspect",
+ service_name])
+ if rc != 0:
+ return None
+ else:
+ ret = json.loads(out)[0]['Spec']
+ return ret
+
+
+def docker_stack_deploy(module, stack_name, compose_files):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "deploy"]
+ if module.params["prune"]:
+ command += ["--prune"]
+ if module.params["with_registry_auth"]:
+ command += ["--with-registry-auth"]
+ if module.params["resolve_image"]:
+ command += ["--resolve-image",
+ module.params["resolve_image"]]
+ for compose_file in compose_files:
+ command += ["--compose-file",
+ compose_file]
+ command += [stack_name]
+ return module.run_command(command)
+
+
+def docker_stack_inspect(module, stack_name):
+ ret = {}
+ for service_name in docker_stack_services(module, stack_name):
+ ret[service_name] = docker_service_inspect(module, service_name)
+ return ret
+
+
+def docker_stack_rm(module, stack_name, retries, interval):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "rm", stack_name]
+
+ rc, out, err = module.run_command(command)
+
+ while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
+ sleep(interval)
+ retries = retries - 1
+ rc, out, err = module.run_command(command)
+ return rc, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'compose': dict(type='list', elements='raw', default=[]),
+ 'prune': dict(type='bool', default=False),
+ 'with_registry_auth': dict(type='bool', default=False),
+ 'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
+ 'state': dict(type='str', default='present', choices=['present', 'absent']),
+ 'absent_retries': dict(type='int', default=0),
+ 'absent_retries_interval': dict(type='int', default=1)
+ },
+ supports_check_mode=False
+ )
+
+ if not HAS_JSONDIFF:
+ return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
+
+ if not HAS_YAML:
+ return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
+
+ state = module.params['state']
+ compose = module.params['compose']
+ name = module.params['name']
+ absent_retries = module.params['absent_retries']
+ absent_retries_interval = module.params['absent_retries_interval']
+
+ if state == 'present':
+ if not compose:
+ module.fail_json(msg=("compose parameter must be a list "
+ "containing at least one element"))
+
+ compose_files = []
+ for i, compose_def in enumerate(compose):
+ if isinstance(compose_def, dict):
+ compose_file_fd, compose_file = tempfile.mkstemp()
+ module.add_cleanup_file(compose_file)
+ with os.fdopen(compose_file_fd, 'w') as stack_file:
+ compose_files.append(compose_file)
+ stack_file.write(yaml_dump(compose_def))
+ elif isinstance(compose_def, string_types):
+ compose_files.append(compose_def)
+ else:
+ module.fail_json(msg="compose element '%s' must be a string or a dictionary" % compose_def)
+
+ before_stack_services = docker_stack_inspect(module, name)
+
+ rc, out, err = docker_stack_deploy(module, name, compose_files)
+
+ after_stack_services = docker_stack_inspect(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="docker stack up deploy command failed",
+ rc=rc,
+ stdout=out, stderr=err)
+
+ before_after_differences = json_diff(before_stack_services,
+ after_stack_services)
+ for k in before_after_differences.keys():
+ if isinstance(before_after_differences[k], dict):
+ before_after_differences[k].pop('UpdatedAt', None)
+ before_after_differences[k].pop('Version', None)
+ if not list(before_after_differences[k].keys()):
+ before_after_differences.pop(k)
+
+ if not before_after_differences:
+ module.exit_json(
+ changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err)
+ else:
+ module.exit_json(
+ changed=True,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ stack_spec_diff=json_diff(before_stack_services,
+ after_stack_services,
+ dump=True))
+
+ else:
+ if docker_stack_services(module, name):
+ rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
+ if rc != 0:
+ module.fail_json(msg="'docker stack down' command failed",
+ rc=rc,
+ stdout=out, stderr=err)
+ else:
+ module.exit_json(changed=True,
+ msg=out, rc=rc,
+ stdout=out, stderr=err)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_stack_info.py b/ansible_collections/community/docker/plugins/modules/docker_stack_info.py
new file mode 100644
index 00000000..bf3bfbdb
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_stack_info.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information on a docker stack
+description:
+ - Retrieve information on docker stacks using the C(docker stack) command
+ on the target node (see examples).
+extends_documentation_fragment:
+ - community.docker.attributes
+ - community.docker.attributes.info_module
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of stacks or tasks associated
+ to a stack name.
+ sample:
+ - {"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"}
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.docker.docker_stack_info:
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_list(module):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ls", "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ },
+ supports_check_mode=True
+ )
+
+ rc, out, err = docker_stack_list(module)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py b/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py
new file mode 100644
index 00000000..e3693bc5
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_task_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information of the tasks on a docker stack
+description:
+ - Retrieve information on docker stacks tasks using the C(docker stack) command
+ on the target node (see examples).
+extends_documentation_fragment:
+ - community.docker.attributes
+ - community.docker.attributes.info_module
+options:
+ name:
+ description:
+ - Stack name.
+ type: str
+ required: true
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of tasks associated
+ to a stack name.
+ sample: >
+ [{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}]
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.docker.docker_stack_task_info:
+ name: test_stack
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_task(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True)
+ },
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+
+ rc, out, err = docker_stack_task(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm.py b/ansible_collections/community/docker/plugins/modules/docker_swarm.py
new file mode 100644
index 00000000..31306ecf
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_swarm.py
@@ -0,0 +1,725 @@
+#!/usr/bin/python
+
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm
+short_description: Manage Swarm cluster
+description:
+ - Create a new Swarm cluster.
+ - Add/Remove nodes or managers to an existing cluster.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ advertise_addr:
+ description:
+ - Externally reachable address advertised to other nodes.
+ - This can either be an address/port combination
+ in the form C(192.168.1.1:4567), or an interface followed by a
+ port number, like C(eth0:4567).
+ - If the port number is omitted,
+ the port number from the listen address is used.
+ - If I(advertise_addr) is not specified, it will be automatically
+ detected when possible.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default_addr_pool:
+ description:
+ - Default address pool in CIDR format.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: list
+ elements: str
+ subnet_size:
+ description:
+ - Default address pool subnet mask length.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: int
+ listen_addr:
+ description:
+ - Listen address used for inter-manager communication.
+ - This can either be an address/port combination in the form
+ C(192.168.1.1:4567), or an interface followed by a port number,
+ like C(eth0:4567).
+ - If the port number is omitted, the default swarm listening port
+ is used.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default: 0.0.0.0:2377
+ force:
+ description:
+ - Use with state C(present) to force creating a new Swarm, even if already part of one.
+ - Use with state C(absent) to Leave the swarm even if this node is a manager.
+ type: bool
+ default: false
+ state:
+ description:
+ - Set to C(present), to create/update a new cluster.
+ - Set to C(join), to join an existing cluster.
+ - Set to C(absent), to leave an existing cluster.
+ - Set to C(remove), to remove an absent node from the cluster.
+ Note that removing requires Docker SDK for Python >= 2.4.0.
+ type: str
+ default: present
+ choices:
+ - present
+ - join
+ - absent
+ - remove
+ node_id:
+ description:
+ - Swarm id of the node to remove.
+ - Used with I(state=remove).
+ type: str
+ join_token:
+ description:
+ - Swarm token used to join a swarm cluster.
+ - Used with I(state=join).
+ - If this value is specified, the corresponding value in the return values will be censored by Ansible.
+ This is a side-effect of this value not being logged.
+ type: str
+ remote_addrs:
+ description:
+ - Remote address of one or more manager nodes of an existing Swarm to connect to.
+ - Used with I(state=join).
+ type: list
+ elements: str
+ task_history_retention_limit:
+ description:
+ - Maximum number of tasks history stored.
+ - Docker default value is C(5).
+ type: int
+ snapshot_interval:
+ description:
+ - Number of logs entries between snapshot.
+ - Docker default value is C(10000).
+ type: int
+ keep_old_snapshots:
+ description:
+ - Number of snapshots to keep beyond the current snapshot.
+ - Docker default value is C(0).
+ type: int
+ log_entries_for_slow_followers:
+ description:
+ - Number of log entries to keep around to sync up slow followers after a snapshot is created.
+ type: int
+ heartbeat_tick:
+ description:
+ - Amount of ticks (in seconds) between each heartbeat.
+ - Docker default value is C(1s).
+ type: int
+ election_tick:
+ description:
+ - Amount of ticks (in seconds) needed without a leader to trigger a new election.
+ - Docker default value is C(10s).
+ type: int
+ dispatcher_heartbeat_period:
+ description:
+ - The delay for an agent to send a heartbeat to the dispatcher.
+ - Docker default value is C(5s).
+ type: int
+ node_cert_expiry:
+ description:
+ - Automatic expiry for nodes certificates.
+ - Docker default value is C(3months).
+ type: int
+ name:
+ description:
+ - The name of the swarm.
+ type: str
+ labels:
+ description:
+ - User-defined key/value metadata.
+ - Label operations in this module apply to the docker swarm cluster.
+ Use M(community.docker.docker_node) module to add/modify/remove swarm node labels.
+ - Requires API version >= 1.32.
+ type: dict
+ signing_ca_cert:
+ description:
+ - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a certificate, but the contents of the certificate.
+ - Requires API version >= 1.30.
+ type: str
+ signing_ca_key:
+ description:
+ - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a key, but the contents of the key.
+ - Requires API version >= 1.30.
+ type: str
+ ca_force_rotate:
+ description:
+ - An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
+ if none have been specified.
+ - Docker default value is C(0).
+ - Requires API version >= 1.30.
+ type: int
+ autolock_managers:
+ description:
+ - If set, generate a key and use it to lock data stored on the managers.
+ - Docker default value is C(false).
+ - M(community.docker.docker_swarm_info) can be used to retrieve the unlock key.
+ type: bool
+ rotate_worker_token:
+ description: Rotate the worker join token.
+ type: bool
+ default: false
+ rotate_manager_token:
+ description: Rotate the manager join token.
+ type: bool
+ default: false
+ data_path_addr:
+ description:
+ - Address or interface to use for data path traffic.
+ - This can either be an address in the form C(192.168.1.1), or an interface,
+ like C(eth0).
+ - Only used when swarm is initialised or joined. Because of this it is not
+ considered for idempotency checking.
+ type: str
+ version_added: 2.5.0
+ data_path_port:
+ description:
+ - Port to use for data path traffic.
+ - This needs to be a port number like C(9789).
+ - Only used when swarm is initialised. Because of this it is not
+ considered for idempotency checking.
+ type: int
+ version_added: 3.1.0
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0"
+ - Docker API >= 1.25
+author:
+ - Thierry Bouvet (@tbouvet)
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+'''
+
+EXAMPLES = '''
+
+- name: Init a new swarm with default parameters
+ community.docker.docker_swarm:
+ state: present
+
+- name: Update swarm configuration
+ community.docker.docker_swarm:
+ state: present
+ election_tick: 5
+
+- name: Add nodes
+ community.docker.docker_swarm:
+ state: join
+ advertise_addr: 192.168.1.2
+ join_token: SWMTKN-1--xxxxx
+ remote_addrs: [ '192.168.1.1:2377' ]
+
+- name: Leave swarm for a node
+ community.docker.docker_swarm:
+ state: absent
+
+- name: Remove a swarm manager
+ community.docker.docker_swarm:
+ state: absent
+ force: true
+
+- name: Remove node from swarm
+ community.docker.docker_swarm:
+ state: remove
+ node_id: mynode
+
+- name: Init a new swarm with different data path interface
+ community.docker.docker_swarm:
+ state: present
+ advertise_addr: eth0
+ data_path_addr: ens10
+
+- name: Init a new swarm with a different data path port
+ community.docker.docker_swarm:
+ state: present
+ data_path_port: 9789
+'''
+
+RETURN = '''
+swarm_facts:
+ description: Informations about swarm.
+ returned: success
+ type: dict
+ contains:
+ JoinTokens:
+ description: Tokens to connect to the Swarm.
+ returned: success
+ type: dict
+ contains:
+ Worker:
+ description:
+ - Token to join the cluster as a new *worker* node.
+ - "B(Note:) if this value has been specified as I(join_token), the value here will not
+ be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass I(join_token),
+ make sure your playbook/role does not depend on this return value!"
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ Manager:
+ description:
+ - Token to join the cluster as a new *manager* node.
+ - "B(Note:) if this value has been specified as I(join_token), the value here will not
+ be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass I(join_token),
+ make sure your playbook/role does not depend on this return value!"
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ UnlockKey:
+ description: The swarm unlock-key if I(autolock_managers) is C(true).
+ returned: on success if I(autolock_managers) is C(true)
+ and swarm is initialised, or if I(autolock_managers) has changed.
+ type: str
+ example: SWMKEY-1-xxx
+
+actions:
+ description: Provides the actions done on the swarm.
+ returned: when action failed.
+ type: list
+ elements: str
+ example: ['This cluster is already a swarm cluster']
+
+'''
+
+import json
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ DockerBaseClass,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DifferenceTracker,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+from ansible.module_utils.common.text.converters import to_native
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self):
+ super(TaskParameters, self).__init__()
+
+ self.advertise_addr = None
+ self.listen_addr = None
+ self.remote_addrs = None
+ self.join_token = None
+ self.data_path_addr = None
+ self.data_path_port = None
+
+ # Spec
+ self.snapshot_interval = None
+ self.task_history_retention_limit = None
+ self.keep_old_snapshots = None
+ self.log_entries_for_slow_followers = None
+ self.heartbeat_tick = None
+ self.election_tick = None
+ self.dispatcher_heartbeat_period = None
+ self.node_cert_expiry = None
+ self.name = None
+ self.labels = None
+ self.log_driver = None
+ self.signing_ca_cert = None
+ self.signing_ca_key = None
+ self.ca_force_rotate = None
+ self.autolock_managers = None
+ self.rotate_worker_token = None
+ self.rotate_manager_token = None
+ self.default_addr_pool = None
+ self.subnet_size = None
+
+ @staticmethod
+ def from_ansible_params(client):
+ result = TaskParameters()
+ for key, value in client.module.params.items():
+ if key in result.__dict__:
+ setattr(result, key, value)
+
+ result.update_parameters(client)
+ return result
+
+ def update_from_swarm_info(self, swarm_info):
+ spec = swarm_info['Spec']
+
+ ca_config = spec.get('CAConfig') or dict()
+ if self.node_cert_expiry is None:
+ self.node_cert_expiry = ca_config.get('NodeCertExpiry')
+ if self.ca_force_rotate is None:
+ self.ca_force_rotate = ca_config.get('ForceRotate')
+
+ dispatcher = spec.get('Dispatcher') or dict()
+ if self.dispatcher_heartbeat_period is None:
+ self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
+
+ raft = spec.get('Raft') or dict()
+ if self.snapshot_interval is None:
+ self.snapshot_interval = raft.get('SnapshotInterval')
+ if self.keep_old_snapshots is None:
+ self.keep_old_snapshots = raft.get('KeepOldSnapshots')
+ if self.heartbeat_tick is None:
+ self.heartbeat_tick = raft.get('HeartbeatTick')
+ if self.log_entries_for_slow_followers is None:
+ self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
+ if self.election_tick is None:
+ self.election_tick = raft.get('ElectionTick')
+
+ orchestration = spec.get('Orchestration') or dict()
+ if self.task_history_retention_limit is None:
+ self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
+
+ encryption_config = spec.get('EncryptionConfig') or dict()
+ if self.autolock_managers is None:
+ self.autolock_managers = encryption_config.get('AutoLockManagers')
+
+ if self.name is None:
+ self.name = spec['Name']
+
+ if self.labels is None:
+ self.labels = spec.get('Labels') or {}
+
+ if 'LogDriver' in spec['TaskDefaults']:
+ self.log_driver = spec['TaskDefaults']['LogDriver']
+
+ def update_parameters(self, client):
+ assign = dict(
+ snapshot_interval='snapshot_interval',
+ task_history_retention_limit='task_history_retention_limit',
+ keep_old_snapshots='keep_old_snapshots',
+ log_entries_for_slow_followers='log_entries_for_slow_followers',
+ heartbeat_tick='heartbeat_tick',
+ election_tick='election_tick',
+ dispatcher_heartbeat_period='dispatcher_heartbeat_period',
+ node_cert_expiry='node_cert_expiry',
+ name='name',
+ labels='labels',
+ signing_ca_cert='signing_ca_cert',
+ signing_ca_key='signing_ca_key',
+ ca_force_rotate='ca_force_rotate',
+ autolock_managers='autolock_managers',
+ log_driver='log_driver',
+ )
+ params = dict()
+ for dest, source in assign.items():
+ if not client.option_minimal_versions[source]['supported']:
+ continue
+ value = getattr(self, source)
+ if value is not None:
+ params[dest] = value
+ self.spec = client.create_swarm_spec(**params)
+
+ def compare_to_active(self, other, client, differences):
+ for k in self.__dict__:
+ if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
+ 'rotate_worker_token', 'rotate_manager_token', 'spec',
+ 'default_addr_pool', 'subnet_size', 'data_path_addr',
+ 'data_path_port'):
+ continue
+ if not client.option_minimal_versions[k]['supported']:
+ continue
+ value = getattr(self, k)
+ if value is None:
+ continue
+ other_value = getattr(other, k)
+ if value != other_value:
+ differences.add(k, parameter=value, active=other_value)
+ if self.rotate_worker_token:
+ differences.add('rotate_worker_token', parameter=True, active=False)
+ if self.rotate_manager_token:
+ differences.add('rotate_manager_token', parameter=True, active=False)
+ return differences
+
+
+class SwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+ self.swarm_info = {}
+
+ self.state = client.module.params['state']
+ self.force = client.module.params['force']
+ self.node_id = client.module.params['node_id']
+
+ self.differences = DifferenceTracker()
+ self.parameters = TaskParameters.from_ansible_params(client)
+
+ self.created = False
+
+ def __call__(self):
+ choice_map = {
+ "present": self.init_swarm,
+ "join": self.join,
+ "absent": self.leave,
+ "remove": self.remove,
+ }
+
+ choice_map.get(self.state)()
+
+ if self.client.module._diff or self.parameters.debug:
+ diff = dict()
+ diff['before'], diff['after'] = self.differences.get_before_after()
+ self.results['diff'] = diff
+
+ def inspect_swarm(self):
+ try:
+ data = self.client.inspect_swarm()
+ json_str = json.dumps(data, ensure_ascii=False)
+ self.swarm_info = json.loads(json_str)
+
+ self.results['changed'] = False
+ self.results['swarm_facts'] = self.swarm_info
+
+ unlock_key = self.get_unlock_key()
+ self.swarm_info.update(unlock_key)
+ except APIError:
+ return
+
+ def get_unlock_key(self):
+ default = {'UnlockKey': None}
+ if not self.has_swarm_lock_changed():
+ return default
+ try:
+ return self.client.get_unlock_key() or default
+ except APIError:
+ return default
+
+ def has_swarm_lock_changed(self):
+ return self.parameters.autolock_managers and (
+ self.created or self.differences.has_difference_for('autolock_managers')
+ )
+
+ def init_swarm(self):
+ if not self.force and self.client.check_if_swarm_manager():
+ self.__update_swarm()
+ return
+
+ if not self.check_mode:
+ init_arguments = {
+ 'advertise_addr': self.parameters.advertise_addr,
+ 'listen_addr': self.parameters.listen_addr,
+ 'data_path_addr': self.parameters.data_path_addr,
+ 'force_new_cluster': self.force,
+ 'swarm_spec': self.parameters.spec,
+ }
+ if self.parameters.default_addr_pool is not None:
+ init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
+ if self.parameters.subnet_size is not None:
+ init_arguments['subnet_size'] = self.parameters.subnet_size
+ if self.parameters.data_path_port is not None:
+ init_arguments['data_path_port'] = self.parameters.data_path_port
+ try:
+ self.client.init_swarm(**init_arguments)
+ except APIError as exc:
+ self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
+
+ if not self.client.check_if_swarm_manager():
+ if not self.check_mode:
+ self.client.fail("Swarm not created or other error!")
+
+ self.created = True
+ self.inspect_swarm()
+ self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
+ self.differences.add('state', parameter='present', active='absent')
+ self.results['changed'] = True
+ self.results['swarm_facts'] = {
+ 'JoinTokens': self.swarm_info.get('JoinTokens'),
+ 'UnlockKey': self.swarm_info.get('UnlockKey')
+ }
+
+ def __update_swarm(self):
+ try:
+ self.inspect_swarm()
+ version = self.swarm_info['Version']['Index']
+ self.parameters.update_from_swarm_info(self.swarm_info)
+ old_parameters = TaskParameters()
+ old_parameters.update_from_swarm_info(self.swarm_info)
+ self.parameters.compare_to_active(old_parameters, self.client, self.differences)
+ if self.differences.empty:
+ self.results['actions'].append("No modification")
+ self.results['changed'] = False
+ return
+ update_parameters = TaskParameters.from_ansible_params(self.client)
+ update_parameters.update_parameters(self.client)
+ if not self.check_mode:
+ self.client.update_swarm(
+ version=version, swarm_spec=update_parameters.spec,
+ rotate_worker_token=self.parameters.rotate_worker_token,
+ rotate_manager_token=self.parameters.rotate_manager_token)
+ except APIError as exc:
+ self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
+ return
+
+ self.inspect_swarm()
+ self.results['actions'].append("Swarm cluster updated")
+ self.results['changed'] = True
+
+ def join(self):
+ if self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is already part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.join_swarm(
+ remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
+ listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr,
+ data_path_addr=self.parameters.data_path_addr)
+ except APIError as exc:
+ self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("New node is added to swarm cluster")
+ self.differences.add('joined', parameter=True, active=False)
+ self.results['changed'] = True
+
+ def leave(self):
+ if not self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is not part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.leave_swarm(force=self.force)
+ except APIError as exc:
+ self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node has left the swarm cluster")
+ self.differences.add('joined', parameter='absent', active='present')
+ self.results['changed'] = True
+
+ def remove(self):
+ if not self.client.check_if_swarm_manager():
+ self.client.fail("This node is not a manager.")
+
+ try:
+ status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
+ except APIError:
+ return
+
+ if not status_down:
+ self.client.fail("Can not remove the node. The status node is ready and not down.")
+
+ if not self.check_mode:
+ try:
+ self.client.remove_node(node_id=self.node_id, force=self.force)
+ except APIError as exc:
+ self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node is removed from swarm cluster.")
+ self.differences.add('joined', parameter=False, active=True)
+ self.results['changed'] = True
+
+
+def _detect_remove_operation(client):
+ return client.module.params['state'] == 'remove'
+
+
+def main():
+ argument_spec = dict(
+ advertise_addr=dict(type='str'),
+ data_path_addr=dict(type='str'),
+ data_path_port=dict(type='int'),
+ state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove']),
+ force=dict(type='bool', default=False),
+ listen_addr=dict(type='str', default='0.0.0.0:2377'),
+ remote_addrs=dict(type='list', elements='str'),
+ join_token=dict(type='str', no_log=True),
+ snapshot_interval=dict(type='int'),
+ task_history_retention_limit=dict(type='int'),
+ keep_old_snapshots=dict(type='int'),
+ log_entries_for_slow_followers=dict(type='int'),
+ heartbeat_tick=dict(type='int'),
+ election_tick=dict(type='int'),
+ dispatcher_heartbeat_period=dict(type='int'),
+ node_cert_expiry=dict(type='int'),
+ name=dict(type='str'),
+ labels=dict(type='dict'),
+ signing_ca_cert=dict(type='str'),
+ signing_ca_key=dict(type='str', no_log=True),
+ ca_force_rotate=dict(type='int'),
+ autolock_managers=dict(type='bool'),
+ node_id=dict(type='str'),
+ rotate_worker_token=dict(type='bool', default=False),
+ rotate_manager_token=dict(type='bool', default=False),
+ default_addr_pool=dict(type='list', elements='str'),
+ subnet_size=dict(type='int'),
+ )
+
+ required_if = [
+ ('state', 'join', ['remote_addrs', 'join_token']),
+ ('state', 'remove', ['node_id'])
+ ]
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
+ signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ autolock_managers=dict(docker_py_version='2.6.0'),
+ log_driver=dict(docker_py_version='2.6.0'),
+ remove_operation=dict(
+ docker_py_version='2.4.0',
+ detect_usage=_detect_remove_operation,
+ usage_msg='remove swarm nodes'
+ ),
+ default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ data_path_addr=dict(docker_py_version='4.0.0', docker_api_version='1.30'),
+ data_path_port=dict(docker_py_version='6.0.0', docker_api_version='1.40'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='1.10.0',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ result='',
+ actions=[]
+ )
+
+ SwarmManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py b/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py
new file mode 100644
index 00000000..97025a65
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_info
+
+short_description: Retrieves facts about Docker Swarm cluster.
+
+description:
+ - Retrieves facts about a Docker Swarm.
+ - Returns lists of swarm objects names for the services - nodes, services, tasks.
+ - The output differs depending on API version available on docker host.
+ - Must be run on Swarm Manager node; otherwise module fails with error message.
+ It does return boolean flags in on both error and success which indicate whether
+ the docker daemon can be communicated with, whether it is in Swarm mode, and
+ whether it is a Swarm Manager node.
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ nodes:
+ description:
+ - Whether to list swarm nodes.
+ type: bool
+ default: false
+ nodes_filters:
+ description:
+ - A dictionary of filter values used for selecting nodes to list.
+ - "For example, C(name: mynode)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ services:
+ description:
+ - Whether to list swarm services.
+ type: bool
+ default: false
+ services_filters:
+ description:
+ - A dictionary of filter values used for selecting services to list.
+ - "For example, C(name: myservice)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ tasks:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: false
+ tasks_filters:
+ description:
+ - A dictionary of filter values used for selecting tasks to list.
+ - "For example, C(node: mynode-1)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering)
+ for more information on possible filters.
+ type: dict
+ unlock_key:
+ description:
+ - Whether to retrieve the swarm unlock key.
+ type: bool
+ default: false
+ verbose_output:
+ description:
+ - When set to C(true) and I(nodes), I(services) or I(tasks) is set to C(true), then the module output will
+ contain verbose information about objects matching the full output of API method.
+ - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: false
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get info on Docker Swarm
+ community.docker.docker_swarm_info:
+ ignore_errors: true
+ register: result
+
+- name: Inform about basic flags
+ ansible.builtin.debug:
+ msg: |
+ Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
+ Docker in Swarm mode: {{ result.docker_swarm_active }}
+ This is a Manager node: {{ result.docker_swarm_manager }}
+
+- block:
+
+- name: Get info on Docker Swarm and list of registered nodes
+ community.docker.docker_swarm_info:
+ nodes: true
+ register: result
+
+- name: Get info on Docker Swarm and extended list of registered nodes
+ community.docker.docker_swarm_info:
+ nodes: true
+ verbose_output: true
+ register: result
+
+- name: Get info on Docker Swarm and filtered list of registered nodes
+ community.docker.docker_swarm_info:
+ nodes: true
+ nodes_filters:
+ name: mynode
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_facts
+
+- name: Get the swarm unlock key
+ community.docker.docker_swarm_info:
+ unlock_key: true
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_unlock_key
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+docker_swarm_active:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ and the docker daemon is in Swarm mode.
+ returned: both on success and on error
+ type: bool
+docker_swarm_manager:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ the docker daemon is in Swarm mode, and the current node is
+ a manager node.
+ - Only if this one is C(true), the module will not fail.
+ returned: both on success and on error
+ type: bool
+swarm_facts:
+ description:
+ - Facts representing the basic state of the docker Swarm cluster.
+ - Contains tokens to connect to the Swarm
+ returned: always
+ type: dict
+swarm_unlock_key:
+ description:
+ - Contains the key needed to unlock the swarm.
+ returned: When I(unlock_key) is C(true).
+ type: str
+nodes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker node ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(nodes) is C(true)
+ type: list
+ elements: dict
+services:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(services) is C(true)
+ type: list
+ elements: dict
+tasks:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ps) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(tasks) is C(true)
+ type: list
+ elements: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker_common
+ pass
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+from ansible_collections.community.docker.plugins.module_utils.common import RequestException
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ clean_dict_booleans_for_docker_api,
+)
+
+
+class DockerSwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerSwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['tasks', 'services', 'nodes']
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.results['swarm_facts'] = self.get_docker_swarm_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+ if self.client.module.params['unlock_key']:
+ self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
+
+ def get_docker_swarm_facts(self):
+ try:
+ return self.client.inspect_swarm()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None):
+ items = None
+ items_list = []
+
+ try:
+ if docker_object == 'nodes':
+ items = self.client.nodes(filters=filters)
+ elif docker_object == 'tasks':
+ items = self.client.tasks(filters=filters)
+ elif docker_object == 'services':
+ items = self.client.services(filters=filters)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ return items
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'nodes':
+ item_record = self.get_essential_facts_nodes(item)
+ elif docker_object == 'tasks':
+ item_record = self.get_essential_facts_tasks(item)
+ elif docker_object == 'services':
+ item_record = self.get_essential_facts_services(item)
+ if item_record['Mode'] == 'Global':
+ item_record['Replicas'] = len(items)
+ items_list.append(item_record)
+
+ return items_list
+
+ @staticmethod
+ def get_essential_facts_nodes(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item.get('ID')
+ object_essentials['Hostname'] = item['Description']['Hostname']
+ object_essentials['Status'] = item['Status']['State']
+ object_essentials['Availability'] = item['Spec']['Availability']
+ if 'ManagerStatus' in item:
+ object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
+ if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
+ object_essentials['ManagerStatus'] = "Leader"
+ else:
+ object_essentials['ManagerStatus'] = None
+ object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
+
+ return object_essentials
+
+ def get_essential_facts_tasks(self, item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ # Returning container ID to not trigger another connection to host
+ # Container ID is sufficient to get extended info in other tasks
+ object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
+ object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
+ object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
+ object_essentials['DesiredState'] = item['DesiredState']
+ object_essentials['CurrentState'] = item['Status']['State']
+ if 'Err' in item['Status']:
+ object_essentials['Error'] = item['Status']['Err']
+ else:
+ object_essentials['Error'] = None
+
+ return object_essentials
+
+ @staticmethod
+ def get_essential_facts_services(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ object_essentials['Name'] = item['Spec']['Name']
+ if 'Replicated' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Replicated"
+ object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
+ elif 'Global' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Global"
+ # Number of replicas have to be updated in calling method or may be left as None
+ object_essentials['Replicas'] = None
+ object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
+ if 'Ports' in item['Spec']['EndpointSpec']:
+ object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
+ else:
+ object_essentials['Ports'] = []
+
+ return object_essentials
+
+ def get_docker_swarm_unlock_key(self):
+ unlock_key = self.client.get_unlock_key() or {}
+ return unlock_key.get('UnlockKey') or None
+
+
+def main():
+ argument_spec = dict(
+ nodes=dict(type='bool', default=False),
+ nodes_filters=dict(type='dict'),
+ tasks=dict(type='bool', default=False),
+ tasks_filters=dict(type='dict'),
+ services=dict(type='bool', default=False),
+ services_filters=dict(type='dict'),
+ unlock_key=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+ option_minimal_versions = dict(
+ unlock_key=dict(docker_py_version='2.7.0'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ docker_swarm_active=False,
+ docker_swarm_manager=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+ client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
+ client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerSwarmManager(client, results)
+ results.update(client.fail_results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py b/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py
new file mode 100644
index 00000000..bb12fd38
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py
@@ -0,0 +1,2866 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017, Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service
+author:
+ - "Dario Zanzico (@dariko)"
+ - "Jason Witkowski (@jwitko)"
+ - "Hannes Ljungberg (@hannseman)"
+ - "Piotr Wojciechowski (@wojciechowskipiotr)"
+short_description: docker swarm service
+description:
+ - Manages docker services via a swarm manager node.
+ - This modules does not support updating services in a stack.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_2_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ args:
+ description:
+ - List arguments to be passed to the container.
+ - Corresponds to the C(ARG) parameter of C(docker service create).
+ type: list
+ elements: str
+ command:
+ description:
+ - Command to execute when the container starts.
+ - A command may be either a string or a list or a list of strings.
+ - Corresponds to the C(COMMAND) parameter of C(docker service create).
+ type: raw
+ configs:
+ description:
+ - List of dictionaries describing the service configs.
+ - Corresponds to the C(--config) option of C(docker service create).
+ - Requires API version >= 1.30.
+ type: list
+ elements: dict
+ suboptions:
+ config_id:
+ description:
+ - Config's ID.
+ type: str
+ config_name:
+ description:
+ - Config's name as defined at its creation.
+ type: str
+ required: true
+ filename:
+ description:
+ - Name of the file containing the config. Defaults to the I(config_name) if not specified.
+ type: str
+ uid:
+ description:
+ - UID of the config file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the config file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ container_labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--container-label) option of C(docker service create).
+ type: dict
+ dns:
+ description:
+ - List of custom DNS servers.
+ - Corresponds to the C(--dns) option of C(docker service create).
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of custom DNS search domains.
+ - Corresponds to the C(--dns-search) option of C(docker service create).
+ type: list
+ elements: str
+ dns_options:
+ description:
+ - List of custom DNS options.
+ - Corresponds to the C(--dns-option) option of C(docker service create).
+ type: list
+ elements: str
+ endpoint_mode:
+ description:
+ - Service endpoint mode.
+ - Corresponds to the C(--endpoint-mode) option of C(docker service create).
+ type: str
+ choices:
+ - vip
+ - dnsrr
+ env:
+ description:
+ - List or dictionary of the service environment variables.
+ - If passed a list each items need to be in the format of C(KEY=VALUE).
+ - If passed a dictionary values which might be parsed as numbers,
+ booleans or other types by the YAML parser must be quoted (for example C("true"))
+ in order to avoid data loss.
+ - Corresponds to the C(--env) option of C(docker service create).
+ type: raw
+ env_files:
+ description:
+ - List of paths to files, present on the target, containing environment variables C(FOO=BAR).
+ - The order of the list is significant in determining the value assigned to a
+ variable that shows up more than once.
+ - If variable also present in I(env), then I(env) value will override.
+ type: list
+ elements: path
+ force_update:
+ description:
+ - Force update even if no changes require it.
+ - Corresponds to the C(--force) option of C(docker service update).
+ type: bool
+ default: false
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ - Corresponds to the C(--group) option of C(docker service update).
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work.
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ type: str
+ retries:
+ description:
+ - Consecutive failures needed to report unhealthy. It accept integer value.
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ type: str
+ hostname:
+ description:
+ - Container hostname.
+ - Corresponds to the C(--hostname) option of C(docker service create).
+ type: str
+ hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's /etc/hosts file.
+ - Corresponds to the C(--host) option of C(docker service create).
+ type: dict
+ image:
+ description:
+ - Service image path and tag.
+ - Corresponds to the C(IMAGE) parameter of C(docker service create).
+ type: str
+ init:
+ description:
+ - Use an init inside each service container to forward signals and reap processes.
+ - Corresponds to the C(--init) option of C(docker service create).
+ - Requires API version >= 1.37.
+ type: bool
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--label) option of C(docker service create).
+ type: dict
+ limits:
+ description:
+ - Configures service resource limits.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ type: str
+ type: dict
+ logging:
+ description:
+ - "Logging configuration for the service."
+ suboptions:
+ driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ type: str
+ options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ type: dict
+ type: dict
+ mode:
+ description:
+ - Service replication mode.
+ - Service will be removed and recreated when changed.
+ - Corresponds to the C(--mode) option of C(docker service create).
+ type: str
+ default: replicated
+ choices:
+ - replicated
+ - global
+ mounts:
+ description:
+ - List of dictionaries describing the service mounts.
+ - Corresponds to the C(--mount) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ source:
+ description:
+ - Mount source (for example a volume name or a host path).
+ - Must be specified if I(type) is not C(tmpfs).
+ type: str
+ target:
+ description:
+ - Container path.
+ type: str
+ required: true
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9.
+ type: str
+ default: bind
+ choices:
+ - bind
+ - volume
+ - tmpfs
+ - npipe
+ readonly:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ labels:
+ description:
+ - Volume labels to apply.
+ type: dict
+ propagation:
+ description:
+ - The propagation mode to use.
+ - Can only be used when I(type) is C(bind).
+ type: str
+ choices:
+ - shared
+ - slave
+ - private
+ - rshared
+ - rslave
+ - rprivate
+ no_copy:
+ description:
+ - Disable copying of data from a container when a volume is created.
+ - Can only be used when I(type) is C(volume).
+ type: bool
+ driver_config:
+ description:
+ - Volume driver configuration.
+ - Can only be used when I(type) is C(volume).
+ suboptions:
+ name:
+ description:
+ - Name of the volume-driver plugin to use for the volume.
+ type: str
+ options:
+ description:
+ - Options as key-value pairs to pass to the driver for this volume.
+ type: dict
+ type: dict
+ tmpfs_size:
+ description:
+ - "Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Can only be used when I(type) is C(tmpfs).
+ type: str
+ tmpfs_mode:
+ description:
+ - File mode of the tmpfs in octal.
+ - Can only be used when I(type) is C(tmpfs).
+ type: int
+ name:
+ description:
+ - Service name.
+ - Corresponds to the C(--name) option of C(docker service create).
+ type: str
+ required: true
+ networks:
+ description:
+ - List of the service networks names or dictionaries.
+ - When passed dictionaries valid sub-options are I(name), which is required, and
+ I(aliases) and I(options).
+ - Prior to API version 1.29, updating and removing networks is not supported.
+ If changes are made the service will then be removed and recreated.
+ - Corresponds to the C(--network) option of C(docker service create).
+ type: list
+ elements: raw
+ placement:
+ description:
+ - Configures service placement preferences and constraints.
+ suboptions:
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ type: list
+ elements: str
+ preferences:
+ description:
+ - List of the placement preferences as key value pairs.
+ - Corresponds to the C(--placement-pref) option of C(docker service create).
+ - Requires API version >= 1.27.
+ type: list
+ elements: dict
+ replicas_max_per_node:
+ description:
+ - Maximum number of tasks per node.
+ - Corresponds to the C(--replicas_max_per_node) option of C(docker service create).
+ - Requires API version >= 1.40
+ type: int
+ version_added: 1.3.0
+ type: dict
+ publish:
+ description:
+ - List of dictionaries describing the service published ports.
+ - Corresponds to the C(--publish) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ published_port:
+ description:
+ - The port to make externally available.
+ type: int
+ required: false
+ target_port:
+ description:
+ - The port inside the container to expose.
+ type: int
+ required: true
+ protocol:
+ description:
+ - What protocol to use.
+ type: str
+ default: tcp
+ choices:
+ - tcp
+ - udp
+ mode:
+ description:
+ - What publish mode to use.
+ - Requires API version >= 1.32.
+ type: str
+ choices:
+ - ingress
+ - host
+ read_only:
+ description:
+ - Mount the containers root filesystem as read only.
+ - Corresponds to the C(--read-only) option of C(docker service create).
+ type: bool
+ replicas:
+ description:
+ - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated).
+ - If set to C(-1), and service is not present, service replicas will be set to C(1).
+ - If set to C(-1), and service is present, service replicas will be unchanged.
+ - Corresponds to the C(--replicas) option of C(docker service create).
+ type: int
+ default: -1
+ reservations:
+ description:
+ - Configures service resource reservations.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ type: str
+ type: dict
+ resolve_image:
+ description:
+ - If the current image digest should be resolved from registry and updated if changed.
+ - Requires API version >= 1.30.
+ type: bool
+ default: false
+ restart_config:
+ description:
+ - Configures if and how to restart containers when they exit.
+ suboptions:
+ condition:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ type: str
+ max_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: int
+ window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ type: str
+ type: dict
+ rollback_config:
+ description:
+ - Configures how the service should be rolled back in case of a failing update.
+ suboptions:
+ parallelism:
+ description:
+ - The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously.
+ - Corresponds to the C(--rollback-parallelism) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: int
+ delay:
+ description:
+ - Delay between task rollbacks.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-delay) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of rollback failure.
+ - Corresponds to the C(--rollback-failure-action) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ choices:
+ - continue
+ - pause
+ monitor:
+ description:
+ - Duration after each task rollback to monitor for failure.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-monitor) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during a rollback.
+ - Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations during rollbacks.
+ - Corresponds to the C(--rollback-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ secrets:
+ description:
+ - List of dictionaries describing the service secrets.
+ - Corresponds to the C(--secret) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ secret_id:
+ description:
+ - Secret's ID.
+ type: str
+ secret_name:
+ description:
+ - Secret's name as defined at its creation.
+ type: str
+ required: true
+ filename:
+ description:
+ - Name of the file containing the secret. Defaults to the I(secret_name) if not specified.
+ - Corresponds to the C(target) key of C(docker service create --secret).
+ type: str
+ uid:
+ description:
+ - UID of the secret file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the secret file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ state:
+ description:
+ - C(absent) - A service matching the specified name will be removed and have its tasks stopped.
+ - C(present) - Asserts the existence of a service matching the name and provided configuration parameters.
+ Unspecified configuration parameters will be set to docker defaults.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ stop_grace_period:
+ description:
+ - Time to wait before force killing a container.
+ - "Accepts a duration as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--stop-grace-period) option of C(docker service create).
+ type: str
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ - Corresponds to the C(--stop-signal) option of C(docker service create).
+ type: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - Corresponds to the C(--tty) option of C(docker service create).
+ type: bool
+ update_config:
+ description:
+ - Configures how the service should be updated. Useful for configuring rolling updates.
+ suboptions:
+ parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ type: int
+ delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ type: float
+ order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used for the specified command.
+ - Before Ansible 2.8, the default value for this option was C(root).
+ - The default has been removed so that the user defined in the image is used if no user is specified here.
+ - Corresponds to the C(--user) option of C(docker service create).
+ type: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ - Corresponds to the C(--workdir) option of C(docker service create).
+ type: str
+ cap_add:
+ description:
+ - List of capabilities to add to the container.
+ - Requires API version >= 1.41.
+ type: list
+ elements: str
+ version_added: 2.2.0
+ cap_drop:
+ description:
+ - List of capabilities to drop from the container.
+ - Requires API version >= 1.41.
+ type: list
+ elements: str
+ version_added: 2.2.0
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2"
+ - "Docker API >= 1.25"
+notes:
+ - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0.
+ When using older versions use C(force_update: true) to trigger the swarm to resolve a new image."
+'''
+
+RETURN = '''
+swarm_service:
+ returned: always
+ type: dict
+ description:
+ - Dictionary of variables representing the current state of the service.
+ Matches the module parameters format.
+ - Note that facts are not part of registered vars but accessible directly.
+ - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service),
+ while the module actually returned a variable called C(ansible_docker_service). The variable
+ was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0.
+ In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used.
+ sample: '{
+ "args": [
+ "3600"
+ ],
+ "cap_add": null,
+ "cap_drop": [
+ "ALL"
+ ],
+ "command": [
+ "sleep"
+ ],
+ "configs": null,
+ "constraints": [
+ "node.role == manager",
+ "engine.labels.operatingsystem == ubuntu 14.04"
+ ],
+ "container_labels": null,
+ "dns": null,
+ "dns_options": null,
+ "dns_search": null,
+ "endpoint_mode": null,
+ "env": [
+ "ENVVAR1=envvar1",
+ "ENVVAR2=envvar2"
+ ],
+ "force_update": null,
+ "groups": null,
+ "healthcheck": {
+ "interval": 90000000000,
+ "retries": 3,
+ "start_period": 30000000000,
+ "test": [
+ "CMD",
+ "curl",
+ "--fail",
+ "http://nginx.host.com"
+ ],
+ "timeout": 10000000000
+ },
+ "healthcheck_disabled": false,
+ "hostname": null,
+ "hosts": null,
+ "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8",
+ "labels": {
+ "com.example.department": "Finance",
+ "com.example.description": "Accounting webapp"
+ },
+ "limit_cpu": 0.5,
+ "limit_memory": 52428800,
+ "log_driver": "fluentd",
+ "log_driver_options": {
+ "fluentd-address": "127.0.0.1:24224",
+ "fluentd-async-connect": "true",
+ "tag": "myservice"
+ },
+ "mode": "replicated",
+ "mounts": [
+ {
+ "readonly": false,
+ "source": "/tmp/",
+ "target": "/remote_tmp/",
+ "type": "bind",
+ "labels": null,
+ "propagation": null,
+ "no_copy": null,
+ "driver_config": null,
+ "tmpfs_size": null,
+ "tmpfs_mode": null
+ }
+ ],
+ "networks": null,
+ "placement_preferences": [
+ {
+ "spread": "node.labels.mylabel"
+ }
+ ],
+ "publish": null,
+ "read_only": null,
+ "replicas": 1,
+ "replicas_max_per_node": 1,
+ "reserve_cpu": 0.25,
+ "reserve_memory": 20971520,
+ "restart_policy": "on-failure",
+ "restart_policy_attempts": 3,
+ "restart_policy_delay": 5000000000,
+ "restart_policy_window": 120000000000,
+ "secrets": null,
+ "stop_grace_period": null,
+ "stop_signal": null,
+ "tty": null,
+ "update_delay": 10000000000,
+ "update_failure_action": null,
+ "update_max_failure_ratio": null,
+ "update_monitor": null,
+ "update_order": "stop-first",
+ "update_parallelism": 2,
+ "user": null,
+ "working_dir": null
+ }'
+changes:
+ returned: always
+ description:
+ - List of changed service attributes if a service has been altered, [] otherwise.
+ type: list
+ elements: str
+ sample: ['container_labels', 'replicas']
+rebuilt:
+ returned: always
+ description:
+ - True if the service has been recreated (removed and created)
+ type: bool
+ sample: True
+'''
+
+EXAMPLES = '''
+- name: Set command and arguments
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ command: sleep
+ args:
+ - "3600"
+
+- name: Set a bind mount
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ mounts:
+ - source: /tmp/
+ target: /remote_tmp/
+ type: bind
+
+- name: Set service labels
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ labels:
+ com.example.description: "Accounting webapp"
+ com.example.department: "Finance"
+
+- name: Set environment variables
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ env:
+ ENVVAR1: envvar1
+ ENVVAR2: envvar2
+ env_files:
+ - envs/common.env
+ - envs/apps/web.env
+
+- name: Set fluentd logging
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ logging:
+ driver: fluentd
+ options:
+ fluentd-address: "127.0.0.1:24224"
+ fluentd-async-connect: "true"
+ tag: myservice
+
+- name: Set restart policies
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ restart_config:
+ condition: on-failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+
+- name: Set update config
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set rollback config
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ failure_action: rollback
+ rollback_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set placement preferences
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ placement:
+ preferences:
+ - spread: node.labels.mylabel
+ constraints:
+ - node.role == manager
+ - engine.labels.operatingsystem == ubuntu 14.04
+ replicas_max_per_node: 2
+
+- name: Set configs
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ configs:
+ - config_name: myconfig_name
+ filename: "/tmp/config.txt"
+
+- name: Set networks
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - mynetwork
+
+- name: Set networks as a dictionary
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - name: "mynetwork"
+ aliases:
+ - "mynetwork_alias"
+ options:
+ foo: bar
+
+- name: Set secrets
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ secrets:
+ - secret_name: mysecret_name
+ filename: "/run/secrets/secret.txt"
+
+- name: Start service with healthcheck
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: nginx:1.13
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Configure service resources
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ reservations:
+ cpus: 0.25
+ memory: 20M
+ limits:
+ cpus: 0.50
+ memory: 50M
+
+- name: Remove service
+ community.docker.docker_swarm_service:
+ name: myservice
+ state: absent
+'''
+
+import shlex
+import time
+import traceback
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DifferenceTracker,
+ DockerBaseClass,
+ convert_duration_to_nanosecond,
+ parse_healthcheck,
+ clean_dict_booleans_for_docker_api,
+)
+
+from ansible.module_utils.basic import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.text.converters import to_text, to_native
+
+try:
+ from docker import types
+ from docker.utils import (
+ parse_repository_tag,
+ parse_env_file,
+ format_environment,
+ )
+ from docker.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+ )
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_docker_environment(env, env_files):
+ """
+ Will return a list of "KEY=VALUE" items. Supplied env variable can
+ be either a list or a dictionary.
+
+ If environment files are combined with explicit environment variables,
+ the explicit environment variables take precedence.
+ """
+ env_dict = {}
+ if env_files:
+ for env_file in env_files:
+ parsed_env_file = parse_env_file(env_file)
+ for name, value in parsed_env_file.items():
+ env_dict[name] = str(value)
+ if env is not None and isinstance(env, string_types):
+ env = env.split(',')
+ if env is not None and isinstance(env, dict):
+ for name, value in env.items():
+ if not isinstance(value, string_types):
+ raise ValueError(
+ 'Non-string value found for env option. '
+ 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name
+ )
+ env_dict[name] = str(value)
+ elif env is not None and isinstance(env, list):
+ for item in env:
+ try:
+ name, value = item.split('=', 1)
+ except ValueError:
+ raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.')
+ env_dict[name] = value
+ elif env is not None:
+ raise ValueError(
+ 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env))
+ )
+ env_list = format_environment(env_dict)
+ if not env_list:
+ if env is not None or env_files is not None:
+ return []
+ else:
+ return None
+ return sorted(env_list)
+
+
+def get_docker_networks(networks, network_ids):
+ """
+ Validate a list of network names or a list of network dictionaries.
+ Network names will be resolved to ids by using the network_ids mapping.
+ """
+ if networks is None:
+ return None
+ parsed_networks = []
+ for network in networks:
+ if isinstance(network, string_types):
+ parsed_network = {'name': network}
+ elif isinstance(network, dict):
+ if 'name' not in network:
+ raise TypeError(
+ '"name" is required when networks are passed as dictionaries.'
+ )
+ name = network.pop('name')
+ parsed_network = {'name': name}
+ aliases = network.pop('aliases', None)
+ if aliases is not None:
+ if not isinstance(aliases, list):
+ raise TypeError('"aliases" network option is only allowed as a list')
+ if not all(
+ isinstance(alias, string_types) for alias in aliases
+ ):
+ raise TypeError('Only strings are allowed as network aliases.')
+ parsed_network['aliases'] = aliases
+ options = network.pop('options', None)
+ if options is not None:
+ if not isinstance(options, dict):
+ raise TypeError('Only dict is allowed as network options.')
+ parsed_network['options'] = clean_dict_booleans_for_docker_api(options)
+ # Check if any invalid keys left
+ if network:
+ invalid_keys = ', '.join(network.keys())
+ raise TypeError(
+ '%s are not valid keys for the networks option' % invalid_keys
+ )
+
+ else:
+ raise TypeError(
+ 'Only a list of strings or dictionaries are allowed to be passed as networks.'
+ )
+ network_name = parsed_network.pop('name')
+ try:
+ parsed_network['id'] = network_ids[network_name]
+ except KeyError as e:
+ raise ValueError('Could not find a network named: %s.' % e)
+ parsed_networks.append(parsed_network)
+ return parsed_networks or []
+
+
+def get_nanoseconds_from_raw_option(name, value):
+ if value is None:
+ return None
+ elif isinstance(value, int):
+ return value
+ elif isinstance(value, string_types):
+ try:
+ return int(value)
+ except ValueError:
+ return convert_duration_to_nanosecond(value)
+ else:
+ raise ValueError(
+ 'Invalid type for %s %s (%s). Only string or int allowed.'
+ % (name, value, type(value))
+ )
+
+
+def get_value(key, values, default=None):
+ value = values.get(key)
+ return value if value is not None else default
+
+
+def has_dict_changed(new_dict, old_dict):
+ """
+ Check if new_dict has differences compared to old_dict while
+ ignoring keys in old_dict which are None in new_dict.
+ """
+ if new_dict is None:
+ return False
+ if not new_dict and old_dict:
+ return True
+ if not old_dict and new_dict:
+ return True
+ defined_options = dict(
+ (option, value) for option, value in new_dict.items()
+ if value is not None
+ )
+ for option, value in defined_options.items():
+ old_value = old_dict.get(option)
+ if not value and not old_value:
+ continue
+ if value != old_value:
+ return True
+ return False
+
+
+def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None):
+ """
+ Check two lists have differences. Sort lists by default.
+ """
+
+ def sort_list(unsorted_list):
+ """
+ Sort a given list.
+ The list may contain dictionaries, so use the sort key to handle them.
+ """
+
+ if unsorted_list and isinstance(unsorted_list[0], dict):
+ if not sort_key:
+ raise Exception(
+ 'A sort key was not specified when sorting list'
+ )
+ else:
+ return sorted(unsorted_list, key=lambda k: k[sort_key])
+
+ # Either the list is empty or does not contain dictionaries
+ try:
+ return sorted(unsorted_list)
+ except TypeError:
+ return unsorted_list
+
+ if new_list is None:
+ return False
+ old_list = old_list or []
+ if len(new_list) != len(old_list):
+ return True
+
+ if sort_lists:
+ zip_data = zip(sort_list(new_list), sort_list(old_list))
+ else:
+ zip_data = zip(new_list, old_list)
+ for new_item, old_item in zip_data:
+ is_same_type = type(new_item) == type(old_item)
+ if not is_same_type:
+ if isinstance(new_item, string_types) and isinstance(old_item, string_types):
+ # Even though the types are different between these items,
+ # they are both strings. Try matching on the same string type.
+ try:
+ new_item_type = type(new_item)
+ old_item_casted = new_item_type(old_item)
+ if new_item != old_item_casted:
+ return True
+ else:
+ continue
+ except UnicodeEncodeError:
+ # Fallback to assuming the strings are different
+ return True
+ else:
+ return True
+ if isinstance(new_item, dict):
+ if has_dict_changed(new_item, old_item):
+ return True
+ elif new_item != old_item:
+ return True
+
+ return False
+
+
+def have_networks_changed(new_networks, old_networks):
+ """Special case list checking for networks to sort aliases"""
+
+ if new_networks is None:
+ return False
+ old_networks = old_networks or []
+ if len(new_networks) != len(old_networks):
+ return True
+
+ zip_data = zip(
+ sorted(new_networks, key=lambda k: k['id']),
+ sorted(old_networks, key=lambda k: k['id'])
+ )
+
+ for new_item, old_item in zip_data:
+ new_item = dict(new_item)
+ old_item = dict(old_item)
+ # Sort the aliases
+ if 'aliases' in new_item:
+ new_item['aliases'] = sorted(new_item['aliases'] or [])
+ if 'aliases' in old_item:
+ old_item['aliases'] = sorted(old_item['aliases'] or [])
+
+ if has_dict_changed(new_item, old_item):
+ return True
+
+ return False
+
+
+class DockerService(DockerBaseClass):
+ def __init__(self, docker_api_version, docker_py_version):
+ super(DockerService, self).__init__()
+ self.image = ""
+ self.command = None
+ self.args = None
+ self.endpoint_mode = None
+ self.dns = None
+ self.healthcheck = None
+ self.healthcheck_disabled = None
+ self.hostname = None
+ self.hosts = None
+ self.tty = None
+ self.dns_search = None
+ self.dns_options = None
+ self.env = None
+ self.force_update = None
+ self.groups = None
+ self.log_driver = None
+ self.log_driver_options = None
+ self.labels = None
+ self.container_labels = None
+ self.limit_cpu = None
+ self.limit_memory = None
+ self.reserve_cpu = None
+ self.reserve_memory = None
+ self.mode = "replicated"
+ self.user = None
+ self.mounts = None
+ self.configs = None
+ self.secrets = None
+ self.constraints = None
+ self.replicas_max_per_node = None
+ self.networks = None
+ self.stop_grace_period = None
+ self.stop_signal = None
+ self.publish = None
+ self.placement_preferences = None
+ self.replicas = -1
+ self.service_id = False
+ self.service_version = False
+ self.read_only = None
+ self.restart_policy = None
+ self.restart_policy_attempts = None
+ self.restart_policy_delay = None
+ self.restart_policy_window = None
+ self.rollback_config = None
+ self.update_delay = None
+ self.update_parallelism = None
+ self.update_failure_action = None
+ self.update_monitor = None
+ self.update_max_failure_ratio = None
+ self.update_order = None
+ self.working_dir = None
+ self.init = None
+ self.cap_add = None
+ self.cap_drop = None
+
+ self.docker_api_version = docker_api_version
+ self.docker_py_version = docker_py_version
+
+ def get_facts(self):
+ return {
+ 'image': self.image,
+ 'mounts': self.mounts,
+ 'configs': self.configs,
+ 'networks': self.networks,
+ 'command': self.command,
+ 'args': self.args,
+ 'tty': self.tty,
+ 'dns': self.dns,
+ 'dns_search': self.dns_search,
+ 'dns_options': self.dns_options,
+ 'healthcheck': self.healthcheck,
+ 'healthcheck_disabled': self.healthcheck_disabled,
+ 'hostname': self.hostname,
+ 'hosts': self.hosts,
+ 'env': self.env,
+ 'force_update': self.force_update,
+ 'groups': self.groups,
+ 'log_driver': self.log_driver,
+ 'log_driver_options': self.log_driver_options,
+ 'publish': self.publish,
+ 'constraints': self.constraints,
+ 'replicas_max_per_node': self.replicas_max_per_node,
+ 'placement_preferences': self.placement_preferences,
+ 'labels': self.labels,
+ 'container_labels': self.container_labels,
+ 'mode': self.mode,
+ 'replicas': self.replicas,
+ 'endpoint_mode': self.endpoint_mode,
+ 'restart_policy': self.restart_policy,
+ 'secrets': self.secrets,
+ 'stop_grace_period': self.stop_grace_period,
+ 'stop_signal': self.stop_signal,
+ 'limit_cpu': self.limit_cpu,
+ 'limit_memory': self.limit_memory,
+ 'read_only': self.read_only,
+ 'reserve_cpu': self.reserve_cpu,
+ 'reserve_memory': self.reserve_memory,
+ 'restart_policy_delay': self.restart_policy_delay,
+ 'restart_policy_attempts': self.restart_policy_attempts,
+ 'restart_policy_window': self.restart_policy_window,
+ 'rollback_config': self.rollback_config,
+ 'update_delay': self.update_delay,
+ 'update_parallelism': self.update_parallelism,
+ 'update_failure_action': self.update_failure_action,
+ 'update_monitor': self.update_monitor,
+ 'update_max_failure_ratio': self.update_max_failure_ratio,
+ 'update_order': self.update_order,
+ 'user': self.user,
+ 'working_dir': self.working_dir,
+ 'init': self.init,
+ 'cap_add': self.cap_add,
+ 'cap_drop': self.cap_drop,
+ }
+
+ @property
+ def can_update_networks(self):
+ # Before Docker API 1.29 adding/removing networks was not supported
+ return (
+ self.docker_api_version >= LooseVersion('1.29') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @property
+ def can_use_task_template_networks(self):
+ # In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec
+ return self.docker_py_version >= LooseVersion('2.7')
+
+ @staticmethod
+ def get_restart_config_from_ansible_params(params):
+ restart_config = params['restart_config'] or {}
+ condition = get_value(
+ 'condition',
+ restart_config,
+ )
+ delay = get_value(
+ 'delay',
+ restart_config,
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'restart_policy_delay',
+ delay
+ )
+ max_attempts = get_value(
+ 'max_attempts',
+ restart_config,
+ )
+ window = get_value(
+ 'window',
+ restart_config,
+ )
+ window = get_nanoseconds_from_raw_option(
+ 'restart_policy_window',
+ window
+ )
+ return {
+ 'restart_policy': condition,
+ 'restart_policy_delay': delay,
+ 'restart_policy_attempts': max_attempts,
+ 'restart_policy_window': window
+ }
+
+ @staticmethod
+ def get_update_config_from_ansible_params(params):
+ update_config = params['update_config'] or {}
+ parallelism = get_value(
+ 'parallelism',
+ update_config,
+ )
+ delay = get_value(
+ 'delay',
+ update_config,
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'update_delay',
+ delay
+ )
+ failure_action = get_value(
+ 'failure_action',
+ update_config,
+ )
+ monitor = get_value(
+ 'monitor',
+ update_config,
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'update_monitor',
+ monitor
+ )
+ max_failure_ratio = get_value(
+ 'max_failure_ratio',
+ update_config,
+ )
+ order = get_value(
+ 'order',
+ update_config,
+ )
+ return {
+ 'update_parallelism': parallelism,
+ 'update_delay': delay,
+ 'update_failure_action': failure_action,
+ 'update_monitor': monitor,
+ 'update_max_failure_ratio': max_failure_ratio,
+ 'update_order': order
+ }
+
+ @staticmethod
+ def get_rollback_config_from_ansible_params(params):
+ if params['rollback_config'] is None:
+ return None
+ rollback_config = params['rollback_config'] or {}
+ delay = get_nanoseconds_from_raw_option(
+ 'rollback_config.delay',
+ rollback_config.get('delay')
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'rollback_config.monitor',
+ rollback_config.get('monitor')
+ )
+ return {
+ 'parallelism': rollback_config.get('parallelism'),
+ 'delay': delay,
+ 'failure_action': rollback_config.get('failure_action'),
+ 'monitor': monitor,
+ 'max_failure_ratio': rollback_config.get('max_failure_ratio'),
+ 'order': rollback_config.get('order'),
+
+ }
+
+ @staticmethod
+ def get_logging_from_ansible_params(params):
+ logging_config = params['logging'] or {}
+ driver = get_value(
+ 'driver',
+ logging_config,
+ )
+ options = get_value(
+ 'options',
+ logging_config,
+ )
+ return {
+ 'log_driver': driver,
+ 'log_driver_options': options,
+ }
+
+ @staticmethod
+ def get_limits_from_ansible_params(params):
+ limits = params['limits'] or {}
+ cpus = get_value(
+ 'cpus',
+ limits,
+ )
+ memory = get_value(
+ 'memory',
+ limits,
+ )
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert limit_memory to bytes: %s' % exc)
+ return {
+ 'limit_cpu': cpus,
+ 'limit_memory': memory,
+ }
+
+ @staticmethod
+ def get_reservations_from_ansible_params(params):
+ reservations = params['reservations'] or {}
+ cpus = get_value(
+ 'cpus',
+ reservations,
+ )
+ memory = get_value(
+ 'memory',
+ reservations,
+ )
+
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert reserve_memory to bytes: %s' % exc)
+ return {
+ 'reserve_cpu': cpus,
+ 'reserve_memory': memory,
+ }
+
+ @staticmethod
+ def get_placement_from_ansible_params(params):
+ placement = params['placement'] or {}
+ constraints = get_value(
+ 'constraints',
+ placement
+ )
+
+ preferences = placement.get('preferences')
+ replicas_max_per_node = get_value(
+ 'replicas_max_per_node',
+ placement
+ )
+
+ return {
+ 'constraints': constraints,
+ 'placement_preferences': preferences,
+ 'replicas_max_per_node': replicas_max_per_node,
+ }
+
+ @classmethod
+ def from_ansible_params(
+ cls,
+ ap,
+ old_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ docker_api_version,
+ docker_py_version,
+ ):
+ s = DockerService(docker_api_version, docker_py_version)
+ s.image = image_digest
+ s.args = ap['args']
+ s.endpoint_mode = ap['endpoint_mode']
+ s.dns = ap['dns']
+ s.dns_search = ap['dns_search']
+ s.dns_options = ap['dns_options']
+ s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck'])
+ s.hostname = ap['hostname']
+ s.hosts = ap['hosts']
+ s.tty = ap['tty']
+ s.labels = ap['labels']
+ s.container_labels = ap['container_labels']
+ s.mode = ap['mode']
+ s.stop_signal = ap['stop_signal']
+ s.user = ap['user']
+ s.working_dir = ap['working_dir']
+ s.read_only = ap['read_only']
+ s.init = ap['init']
+ s.cap_add = ap['cap_add']
+ s.cap_drop = ap['cap_drop']
+
+ s.networks = get_docker_networks(ap['networks'], network_ids)
+
+ s.command = ap['command']
+ if isinstance(s.command, string_types):
+ s.command = shlex.split(s.command)
+ elif isinstance(s.command, list):
+ invalid_items = [
+ (index, item)
+ for index, item in enumerate(s.command)
+ if not isinstance(item, string_types)
+ ]
+ if invalid_items:
+ errors = ', '.join(
+ [
+ '%s (%s) at index %s' % (item, type(item), index)
+ for index, item in invalid_items
+ ]
+ )
+ raise Exception(
+ 'All items in a command list need to be strings. '
+ 'Check quoting. Invalid items: %s.'
+ % errors
+ )
+ s.command = ap['command']
+ elif s.command is not None:
+ raise ValueError(
+ 'Invalid type for command %s (%s). '
+ 'Only string or list allowed. Check quoting.'
+ % (s.command, type(s.command))
+ )
+
+ s.env = get_docker_environment(ap['env'], ap['env_files'])
+ s.rollback_config = cls.get_rollback_config_from_ansible_params(ap)
+
+ update_config = cls.get_update_config_from_ansible_params(ap)
+ for key, value in update_config.items():
+ setattr(s, key, value)
+
+ restart_config = cls.get_restart_config_from_ansible_params(ap)
+ for key, value in restart_config.items():
+ setattr(s, key, value)
+
+ logging_config = cls.get_logging_from_ansible_params(ap)
+ for key, value in logging_config.items():
+ setattr(s, key, value)
+
+ limits = cls.get_limits_from_ansible_params(ap)
+ for key, value in limits.items():
+ setattr(s, key, value)
+
+ reservations = cls.get_reservations_from_ansible_params(ap)
+ for key, value in reservations.items():
+ setattr(s, key, value)
+
+ placement = cls.get_placement_from_ansible_params(ap)
+ for key, value in placement.items():
+ setattr(s, key, value)
+
+ if ap['stop_grace_period'] is not None:
+ s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period'])
+
+ if ap['force_update']:
+ s.force_update = int(str(time.time()).replace('.', ''))
+
+ if ap['groups'] is not None:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ s.groups = [str(g) for g in ap['groups']]
+
+ if ap['replicas'] == -1:
+ if old_service:
+ s.replicas = old_service.replicas
+ else:
+ s.replicas = 1
+ else:
+ s.replicas = ap['replicas']
+
+ if ap['publish'] is not None:
+ s.publish = []
+ for param_p in ap['publish']:
+ service_p = {}
+ service_p['protocol'] = param_p['protocol']
+ service_p['mode'] = param_p['mode']
+ service_p['published_port'] = param_p['published_port']
+ service_p['target_port'] = param_p['target_port']
+ s.publish.append(service_p)
+
+ if ap['mounts'] is not None:
+ s.mounts = []
+ for param_m in ap['mounts']:
+ service_m = {}
+ service_m['readonly'] = param_m['readonly']
+ service_m['type'] = param_m['type']
+ if param_m['source'] is None and param_m['type'] != 'tmpfs':
+ raise ValueError('Source must be specified for mounts which are not of type tmpfs')
+ service_m['source'] = param_m['source'] or ''
+ service_m['target'] = param_m['target']
+ service_m['labels'] = param_m['labels']
+ service_m['no_copy'] = param_m['no_copy']
+ service_m['propagation'] = param_m['propagation']
+ service_m['driver_config'] = param_m['driver_config']
+ service_m['tmpfs_mode'] = param_m['tmpfs_mode']
+ tmpfs_size = param_m['tmpfs_size']
+ if tmpfs_size is not None:
+ try:
+ tmpfs_size = human_to_bytes(tmpfs_size)
+ except ValueError as exc:
+ raise ValueError(
+ 'Failed to convert tmpfs_size to bytes: %s' % exc
+ )
+
+ service_m['tmpfs_size'] = tmpfs_size
+ s.mounts.append(service_m)
+
+ if ap['configs'] is not None:
+ s.configs = []
+ for param_m in ap['configs']:
+ service_c = {}
+ config_name = param_m['config_name']
+ service_c['config_id'] = param_m['config_id'] or config_ids[config_name]
+ service_c['config_name'] = config_name
+ service_c['filename'] = param_m['filename'] or config_name
+ service_c['uid'] = param_m['uid']
+ service_c['gid'] = param_m['gid']
+ service_c['mode'] = param_m['mode']
+ s.configs.append(service_c)
+
+ if ap['secrets'] is not None:
+ s.secrets = []
+ for param_m in ap['secrets']:
+ service_s = {}
+ secret_name = param_m['secret_name']
+ service_s['secret_id'] = param_m['secret_id'] or secret_ids[secret_name]
+ service_s['secret_name'] = secret_name
+ service_s['filename'] = param_m['filename'] or secret_name
+ service_s['uid'] = param_m['uid']
+ service_s['gid'] = param_m['gid']
+ service_s['mode'] = param_m['mode']
+ s.secrets.append(service_s)
+
+ return s
+
+ def compare(self, os):
+ differences = DifferenceTracker()
+ needs_rebuild = False
+ force_update = False
+ if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode:
+ differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode)
+ if has_list_changed(self.env, os.env):
+ differences.add('env', parameter=self.env, active=os.env)
+ if self.log_driver is not None and self.log_driver != os.log_driver:
+ differences.add('log_driver', parameter=self.log_driver, active=os.log_driver)
+ if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}):
+ differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options)
+ if self.mode != os.mode:
+ needs_rebuild = True
+ differences.add('mode', parameter=self.mode, active=os.mode)
+ if has_list_changed(self.mounts, os.mounts, sort_key='target'):
+ differences.add('mounts', parameter=self.mounts, active=os.mounts)
+ if has_list_changed(self.configs, os.configs, sort_key='config_name'):
+ differences.add('configs', parameter=self.configs, active=os.configs)
+ if has_list_changed(self.secrets, os.secrets, sort_key='secret_name'):
+ differences.add('secrets', parameter=self.secrets, active=os.secrets)
+ if have_networks_changed(self.networks, os.networks):
+ differences.add('networks', parameter=self.networks, active=os.networks)
+ needs_rebuild = not self.can_update_networks
+ if self.replicas != os.replicas:
+ differences.add('replicas', parameter=self.replicas, active=os.replicas)
+ if has_list_changed(self.command, os.command, sort_lists=False):
+ differences.add('command', parameter=self.command, active=os.command)
+ if has_list_changed(self.args, os.args, sort_lists=False):
+ differences.add('args', parameter=self.args, active=os.args)
+ if has_list_changed(self.constraints, os.constraints):
+ differences.add('constraints', parameter=self.constraints, active=os.constraints)
+ if self.replicas_max_per_node is not None and self.replicas_max_per_node != os.replicas_max_per_node:
+ differences.add('replicas_max_per_node', parameter=self.replicas_max_per_node, active=os.replicas_max_per_node)
+ if has_list_changed(self.placement_preferences, os.placement_preferences, sort_lists=False):
+ differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences)
+ if has_list_changed(self.groups, os.groups):
+ differences.add('groups', parameter=self.groups, active=os.groups)
+ if self.labels is not None and self.labels != (os.labels or {}):
+ differences.add('labels', parameter=self.labels, active=os.labels)
+ if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu:
+ differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu)
+ if self.limit_memory is not None and self.limit_memory != os.limit_memory:
+ differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory)
+ if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu:
+ differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu)
+ if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory:
+ differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory)
+ if self.container_labels is not None and self.container_labels != (os.container_labels or {}):
+ differences.add('container_labels', parameter=self.container_labels, active=os.container_labels)
+ if self.stop_signal is not None and self.stop_signal != os.stop_signal:
+ differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal)
+ if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period:
+ differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period)
+ if self.has_publish_changed(os.publish):
+ differences.add('publish', parameter=self.publish, active=os.publish)
+ if self.read_only is not None and self.read_only != os.read_only:
+ differences.add('read_only', parameter=self.read_only, active=os.read_only)
+ if self.restart_policy is not None and self.restart_policy != os.restart_policy:
+ differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy)
+ if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts:
+ differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts)
+ if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay:
+ differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay)
+ if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window:
+ differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window)
+ if has_dict_changed(self.rollback_config, os.rollback_config):
+ differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config)
+ if self.update_delay is not None and self.update_delay != os.update_delay:
+ differences.add('update_delay', parameter=self.update_delay, active=os.update_delay)
+ if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism:
+ differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism)
+ if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action:
+ differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action)
+ if self.update_monitor is not None and self.update_monitor != os.update_monitor:
+ differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor)
+ if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio:
+ differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio)
+ if self.update_order is not None and self.update_order != os.update_order:
+ differences.add('update_order', parameter=self.update_order, active=os.update_order)
+ has_image_changed, change = self.has_image_changed(os.image)
+ if has_image_changed:
+ differences.add('image', parameter=self.image, active=change)
+ if self.user and self.user != os.user:
+ differences.add('user', parameter=self.user, active=os.user)
+ if has_list_changed(self.dns, os.dns, sort_lists=False):
+ differences.add('dns', parameter=self.dns, active=os.dns)
+ if has_list_changed(self.dns_search, os.dns_search, sort_lists=False):
+ differences.add('dns_search', parameter=self.dns_search, active=os.dns_search)
+ if has_list_changed(self.dns_options, os.dns_options):
+ differences.add('dns_options', parameter=self.dns_options, active=os.dns_options)
+ if self.has_healthcheck_changed(os):
+ differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck)
+ if self.hostname is not None and self.hostname != os.hostname:
+ differences.add('hostname', parameter=self.hostname, active=os.hostname)
+ if self.hosts is not None and self.hosts != (os.hosts or {}):
+ differences.add('hosts', parameter=self.hosts, active=os.hosts)
+ if self.tty is not None and self.tty != os.tty:
+ differences.add('tty', parameter=self.tty, active=os.tty)
+ if self.working_dir is not None and self.working_dir != os.working_dir:
+ differences.add('working_dir', parameter=self.working_dir, active=os.working_dir)
+ if self.force_update:
+ force_update = True
+ if self.init is not None and self.init != os.init:
+ differences.add('init', parameter=self.init, active=os.init)
+ if has_list_changed(self.cap_add, os.cap_add):
+ differences.add('cap_add', parameter=self.cap_add, active=os.cap_add)
+ if has_list_changed(self.cap_drop, os.cap_drop):
+ differences.add('cap_drop', parameter=self.cap_drop, active=os.cap_drop)
+ return not differences.empty or force_update, differences, needs_rebuild, force_update
+
+ def has_healthcheck_changed(self, old_publish):
+ if self.healthcheck_disabled is False and self.healthcheck is None:
+ return False
+ if self.healthcheck_disabled:
+ if old_publish.healthcheck is None:
+ return False
+ if old_publish.healthcheck.get('test') == ['NONE']:
+ return False
+ return self.healthcheck != old_publish.healthcheck
+
+ def has_publish_changed(self, old_publish):
+ if self.publish is None:
+ return False
+ old_publish = old_publish or []
+ if len(self.publish) != len(old_publish):
+ return True
+
+ def publish_sorter(item):
+ return (item.get('published_port') or 0, item.get('target_port') or 0, item.get('protocol') or '')
+
+ publish = sorted(self.publish, key=publish_sorter)
+ old_publish = sorted(old_publish, key=publish_sorter)
+ for publish_item, old_publish_item in zip(publish, old_publish):
+ ignored_keys = set()
+ if not publish_item.get('mode'):
+ ignored_keys.add('mode')
+ # Create copies of publish_item dicts where keys specified in ignored_keys are left out
+ filtered_old_publish_item = dict(
+ (k, v) for k, v in old_publish_item.items() if k not in ignored_keys
+ )
+ filtered_publish_item = dict(
+ (k, v) for k, v in publish_item.items() if k not in ignored_keys
+ )
+ if filtered_publish_item != filtered_old_publish_item:
+ return True
+ return False
+
+ def has_image_changed(self, old_image):
+ if '@' not in self.image:
+ old_image = old_image.split('@')[0]
+ return self.image != old_image, old_image
+
+ def build_container_spec(self):
+ mounts = None
+ if self.mounts is not None:
+ mounts = []
+ for mount_config in self.mounts:
+ mount_options = {
+ 'target': 'target',
+ 'source': 'source',
+ 'type': 'type',
+ 'readonly': 'read_only',
+ 'propagation': 'propagation',
+ 'labels': 'labels',
+ 'no_copy': 'no_copy',
+ 'driver_config': 'driver_config',
+ 'tmpfs_size': 'tmpfs_size',
+ 'tmpfs_mode': 'tmpfs_mode'
+ }
+ mount_args = {}
+ for option, mount_arg in mount_options.items():
+ value = mount_config.get(option)
+ if value is not None:
+ mount_args[mount_arg] = value
+
+ mounts.append(types.Mount(**mount_args))
+
+ configs = None
+ if self.configs is not None:
+ configs = []
+ for config_config in self.configs:
+ config_args = {
+ 'config_id': config_config['config_id'],
+ 'config_name': config_config['config_name']
+ }
+ filename = config_config.get('filename')
+ if filename:
+ config_args['filename'] = filename
+ uid = config_config.get('uid')
+ if uid:
+ config_args['uid'] = uid
+ gid = config_config.get('gid')
+ if gid:
+ config_args['gid'] = gid
+ mode = config_config.get('mode')
+ if mode:
+ config_args['mode'] = mode
+
+ configs.append(types.ConfigReference(**config_args))
+
+ secrets = None
+ if self.secrets is not None:
+ secrets = []
+ for secret_config in self.secrets:
+ secret_args = {
+ 'secret_id': secret_config['secret_id'],
+ 'secret_name': secret_config['secret_name']
+ }
+ filename = secret_config.get('filename')
+ if filename:
+ secret_args['filename'] = filename
+ uid = secret_config.get('uid')
+ if uid:
+ secret_args['uid'] = uid
+ gid = secret_config.get('gid')
+ if gid:
+ secret_args['gid'] = gid
+ mode = secret_config.get('mode')
+ if mode:
+ secret_args['mode'] = mode
+
+ secrets.append(types.SecretReference(**secret_args))
+
+ dns_config_args = {}
+ if self.dns is not None:
+ dns_config_args['nameservers'] = self.dns
+ if self.dns_search is not None:
+ dns_config_args['search'] = self.dns_search
+ if self.dns_options is not None:
+ dns_config_args['options'] = self.dns_options
+ dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None
+
+ container_spec_args = {}
+ if self.command is not None:
+ container_spec_args['command'] = self.command
+ if self.args is not None:
+ container_spec_args['args'] = self.args
+ if self.env is not None:
+ container_spec_args['env'] = self.env
+ if self.user is not None:
+ container_spec_args['user'] = self.user
+ if self.container_labels is not None:
+ container_spec_args['labels'] = self.container_labels
+ if self.healthcheck is not None:
+ container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck)
+ elif self.healthcheck_disabled:
+ container_spec_args['healthcheck'] = types.Healthcheck(test=['NONE'])
+ if self.hostname is not None:
+ container_spec_args['hostname'] = self.hostname
+ if self.hosts is not None:
+ container_spec_args['hosts'] = self.hosts
+ if self.read_only is not None:
+ container_spec_args['read_only'] = self.read_only
+ if self.stop_grace_period is not None:
+ container_spec_args['stop_grace_period'] = self.stop_grace_period
+ if self.stop_signal is not None:
+ container_spec_args['stop_signal'] = self.stop_signal
+ if self.tty is not None:
+ container_spec_args['tty'] = self.tty
+ if self.groups is not None:
+ container_spec_args['groups'] = self.groups
+ if self.working_dir is not None:
+ container_spec_args['workdir'] = self.working_dir
+ if secrets is not None:
+ container_spec_args['secrets'] = secrets
+ if mounts is not None:
+ container_spec_args['mounts'] = mounts
+ if dns_config is not None:
+ container_spec_args['dns_config'] = dns_config
+ if configs is not None:
+ container_spec_args['configs'] = configs
+ if self.init is not None:
+ container_spec_args['init'] = self.init
+ if self.cap_add is not None:
+ container_spec_args['cap_add'] = self.cap_add
+ if self.cap_drop is not None:
+ container_spec_args['cap_drop'] = self.cap_drop
+
+ return types.ContainerSpec(self.image, **container_spec_args)
+
+ def build_placement(self):
+ placement_args = {}
+ if self.constraints is not None:
+ placement_args['constraints'] = self.constraints
+ if self.replicas_max_per_node is not None:
+ placement_args['maxreplicas'] = self.replicas_max_per_node
+ if self.placement_preferences is not None:
+ placement_args['preferences'] = [
+ {key.title(): {'SpreadDescriptor': value}}
+ for preference in self.placement_preferences
+ for key, value in preference.items()
+ ]
+ return types.Placement(**placement_args) if placement_args else None
+
+ def build_update_config(self):
+ update_config_args = {}
+ if self.update_parallelism is not None:
+ update_config_args['parallelism'] = self.update_parallelism
+ if self.update_delay is not None:
+ update_config_args['delay'] = self.update_delay
+ if self.update_failure_action is not None:
+ update_config_args['failure_action'] = self.update_failure_action
+ if self.update_monitor is not None:
+ update_config_args['monitor'] = self.update_monitor
+ if self.update_max_failure_ratio is not None:
+ update_config_args['max_failure_ratio'] = self.update_max_failure_ratio
+ if self.update_order is not None:
+ update_config_args['order'] = self.update_order
+ return types.UpdateConfig(**update_config_args) if update_config_args else None
+
+ def build_log_driver(self):
+ log_driver_args = {}
+ if self.log_driver is not None:
+ log_driver_args['name'] = self.log_driver
+ if self.log_driver_options is not None:
+ log_driver_args['options'] = self.log_driver_options
+ return types.DriverConfig(**log_driver_args) if log_driver_args else None
+
+ def build_restart_policy(self):
+ restart_policy_args = {}
+ if self.restart_policy is not None:
+ restart_policy_args['condition'] = self.restart_policy
+ if self.restart_policy_delay is not None:
+ restart_policy_args['delay'] = self.restart_policy_delay
+ if self.restart_policy_attempts is not None:
+ restart_policy_args['max_attempts'] = self.restart_policy_attempts
+ if self.restart_policy_window is not None:
+ restart_policy_args['window'] = self.restart_policy_window
+ return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None
+
+ def build_rollback_config(self):
+ if self.rollback_config is None:
+ return None
+ rollback_config_options = [
+ 'parallelism',
+ 'delay',
+ 'failure_action',
+ 'monitor',
+ 'max_failure_ratio',
+ 'order',
+ ]
+ rollback_config_args = {}
+ for option in rollback_config_options:
+ value = self.rollback_config.get(option)
+ if value is not None:
+ rollback_config_args[option] = value
+ return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None
+
+ def build_resources(self):
+ resources_args = {}
+ if self.limit_cpu is not None:
+ resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0)
+ if self.limit_memory is not None:
+ resources_args['mem_limit'] = self.limit_memory
+ if self.reserve_cpu is not None:
+ resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0)
+ if self.reserve_memory is not None:
+ resources_args['mem_reservation'] = self.reserve_memory
+ return types.Resources(**resources_args) if resources_args else None
+
+ def build_task_template(self, container_spec, placement=None):
+ log_driver = self.build_log_driver()
+ restart_policy = self.build_restart_policy()
+ resources = self.build_resources()
+
+ task_template_args = {}
+ if placement is not None:
+ task_template_args['placement'] = placement
+ if log_driver is not None:
+ task_template_args['log_driver'] = log_driver
+ if restart_policy is not None:
+ task_template_args['restart_policy'] = restart_policy
+ if resources is not None:
+ task_template_args['resources'] = resources
+ if self.force_update:
+ task_template_args['force_update'] = self.force_update
+ if self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ task_template_args['networks'] = networks
+ return types.TaskTemplate(container_spec=container_spec, **task_template_args)
+
+ def build_service_mode(self):
+ if self.mode == 'global':
+ self.replicas = None
+ return types.ServiceMode(self.mode, replicas=self.replicas)
+
+ def build_networks(self):
+ networks = None
+ if self.networks is not None:
+ networks = []
+ for network in self.networks:
+ docker_network = {'Target': network['id']}
+ if 'aliases' in network:
+ docker_network['Aliases'] = network['aliases']
+ if 'options' in network:
+ docker_network['DriverOpts'] = network['options']
+ networks.append(docker_network)
+ return networks
+
+ def build_endpoint_spec(self):
+ endpoint_spec_args = {}
+ if self.publish is not None:
+ ports = []
+ for port in self.publish:
+ port_spec = {
+ 'Protocol': port['protocol'],
+ 'TargetPort': port['target_port']
+ }
+ if port.get('published_port'):
+ port_spec['PublishedPort'] = port['published_port']
+ if port.get('mode'):
+ port_spec['PublishMode'] = port['mode']
+ ports.append(port_spec)
+ endpoint_spec_args['ports'] = ports
+ if self.endpoint_mode is not None:
+ endpoint_spec_args['mode'] = self.endpoint_mode
+ return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None
+
+ def build_docker_service(self):
+ container_spec = self.build_container_spec()
+ placement = self.build_placement()
+ task_template = self.build_task_template(container_spec, placement)
+
+ update_config = self.build_update_config()
+ rollback_config = self.build_rollback_config()
+ service_mode = self.build_service_mode()
+ endpoint_spec = self.build_endpoint_spec()
+
+ service = {'task_template': task_template, 'mode': service_mode}
+ if update_config:
+ service['update_config'] = update_config
+ if rollback_config:
+ service['rollback_config'] = rollback_config
+ if endpoint_spec:
+ service['endpoint_spec'] = endpoint_spec
+ if self.labels:
+ service['labels'] = self.labels
+ if not self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ service['networks'] = networks
+ return service
+
+
+class DockerServiceManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.retries = 2
+ self.diff_tracker = None
+
+ def get_service(self, name):
+ try:
+ raw_data = self.client.inspect_service(name)
+ except NotFound:
+ return None
+ ds = DockerService(self.client.docker_api_version, self.client.docker_py_version)
+
+ task_template_data = raw_data['Spec']['TaskTemplate']
+ ds.image = task_template_data['ContainerSpec']['Image']
+ ds.user = task_template_data['ContainerSpec'].get('User')
+ ds.env = task_template_data['ContainerSpec'].get('Env')
+ ds.command = task_template_data['ContainerSpec'].get('Command')
+ ds.args = task_template_data['ContainerSpec'].get('Args')
+ ds.groups = task_template_data['ContainerSpec'].get('Groups')
+ ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod')
+ ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal')
+ ds.working_dir = task_template_data['ContainerSpec'].get('Dir')
+ ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly')
+ ds.cap_add = task_template_data['ContainerSpec'].get('CapabilityAdd')
+ ds.cap_drop = task_template_data['ContainerSpec'].get('CapabilityDrop')
+
+ healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck')
+ if healthcheck_data:
+ options = {
+ 'Test': 'test',
+ 'Interval': 'interval',
+ 'Timeout': 'timeout',
+ 'StartPeriod': 'start_period',
+ 'Retries': 'retries'
+ }
+ healthcheck = dict(
+ (options[key], value) for key, value in healthcheck_data.items()
+ if value is not None and key in options
+ )
+ ds.healthcheck = healthcheck
+
+ update_config_data = raw_data['Spec'].get('UpdateConfig')
+ if update_config_data:
+ ds.update_delay = update_config_data.get('Delay')
+ ds.update_parallelism = update_config_data.get('Parallelism')
+ ds.update_failure_action = update_config_data.get('FailureAction')
+ ds.update_monitor = update_config_data.get('Monitor')
+ ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio')
+ ds.update_order = update_config_data.get('Order')
+
+ rollback_config_data = raw_data['Spec'].get('RollbackConfig')
+ if rollback_config_data:
+ ds.rollback_config = {
+ 'parallelism': rollback_config_data.get('Parallelism'),
+ 'delay': rollback_config_data.get('Delay'),
+ 'failure_action': rollback_config_data.get('FailureAction'),
+ 'monitor': rollback_config_data.get('Monitor'),
+ 'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'),
+ 'order': rollback_config_data.get('Order'),
+ }
+
+ dns_config = task_template_data['ContainerSpec'].get('DNSConfig')
+ if dns_config:
+ ds.dns = dns_config.get('Nameservers')
+ ds.dns_search = dns_config.get('Search')
+ ds.dns_options = dns_config.get('Options')
+
+ ds.hostname = task_template_data['ContainerSpec'].get('Hostname')
+
+ hosts = task_template_data['ContainerSpec'].get('Hosts')
+ if hosts:
+ hosts = [
+ list(reversed(host.split(":", 1)))
+ if ":" in host
+ else host.split(" ", 1)
+ for host in hosts
+ ]
+ ds.hosts = dict((hostname, ip) for ip, hostname in hosts)
+ ds.tty = task_template_data['ContainerSpec'].get('TTY')
+
+ placement = task_template_data.get('Placement')
+ if placement:
+ ds.constraints = placement.get('Constraints')
+ ds.replicas_max_per_node = placement.get('MaxReplicas')
+ placement_preferences = []
+ for preference in placement.get('Preferences', []):
+ placement_preferences.append(
+ dict(
+ (key.lower(), value['SpreadDescriptor'])
+ for key, value in preference.items()
+ )
+ )
+ ds.placement_preferences = placement_preferences or None
+
+ restart_policy_data = task_template_data.get('RestartPolicy')
+ if restart_policy_data:
+ ds.restart_policy = restart_policy_data.get('Condition')
+ ds.restart_policy_delay = restart_policy_data.get('Delay')
+ ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts')
+ ds.restart_policy_window = restart_policy_data.get('Window')
+
+ raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec')
+ if raw_data_endpoint_spec:
+ ds.endpoint_mode = raw_data_endpoint_spec.get('Mode')
+ raw_data_ports = raw_data_endpoint_spec.get('Ports')
+ if raw_data_ports:
+ ds.publish = []
+ for port in raw_data_ports:
+ ds.publish.append({
+ 'protocol': port['Protocol'],
+ 'mode': port.get('PublishMode', None),
+ 'published_port': port.get('PublishedPort', None),
+ 'target_port': int(port['TargetPort'])
+ })
+
+ raw_data_limits = task_template_data.get('Resources', {}).get('Limits')
+ if raw_data_limits:
+ raw_cpu_limits = raw_data_limits.get('NanoCPUs')
+ if raw_cpu_limits:
+ ds.limit_cpu = float(raw_cpu_limits) / 1000000000
+
+ raw_memory_limits = raw_data_limits.get('MemoryBytes')
+ if raw_memory_limits:
+ ds.limit_memory = int(raw_memory_limits)
+
+ raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations')
+ if raw_data_reservations:
+ raw_cpu_reservations = raw_data_reservations.get('NanoCPUs')
+ if raw_cpu_reservations:
+ ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000
+
+ raw_memory_reservations = raw_data_reservations.get('MemoryBytes')
+ if raw_memory_reservations:
+ ds.reserve_memory = int(raw_memory_reservations)
+
+ ds.labels = raw_data['Spec'].get('Labels')
+ ds.log_driver = task_template_data.get('LogDriver', {}).get('Name')
+ ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options')
+ ds.container_labels = task_template_data['ContainerSpec'].get('Labels')
+
+ mode = raw_data['Spec']['Mode']
+ if 'Replicated' in mode.keys():
+ ds.mode = to_text('replicated', encoding='utf-8')
+ ds.replicas = mode['Replicated']['Replicas']
+ elif 'Global' in mode.keys():
+ ds.mode = 'global'
+ else:
+ raise Exception('Unknown service mode: %s' % mode)
+
+ raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts')
+ if raw_data_mounts:
+ ds.mounts = []
+ for mount_data in raw_data_mounts:
+ bind_options = mount_data.get('BindOptions', {})
+ volume_options = mount_data.get('VolumeOptions', {})
+ tmpfs_options = mount_data.get('TmpfsOptions', {})
+ driver_config = volume_options.get('DriverConfig', {})
+ driver_config = dict(
+ (key.lower(), value) for key, value in driver_config.items()
+ ) or None
+ ds.mounts.append({
+ 'source': mount_data.get('Source', ''),
+ 'type': mount_data['Type'],
+ 'target': mount_data['Target'],
+ 'readonly': mount_data.get('ReadOnly'),
+ 'propagation': bind_options.get('Propagation'),
+ 'no_copy': volume_options.get('NoCopy'),
+ 'labels': volume_options.get('Labels'),
+ 'driver_config': driver_config,
+ 'tmpfs_mode': tmpfs_options.get('Mode'),
+ 'tmpfs_size': tmpfs_options.get('SizeBytes'),
+ })
+
+ raw_data_configs = task_template_data['ContainerSpec'].get('Configs')
+ if raw_data_configs:
+ ds.configs = []
+ for config_data in raw_data_configs:
+ ds.configs.append({
+ 'config_id': config_data['ConfigID'],
+ 'config_name': config_data['ConfigName'],
+ 'filename': config_data['File'].get('Name'),
+ 'uid': config_data['File'].get('UID'),
+ 'gid': config_data['File'].get('GID'),
+ 'mode': config_data['File'].get('Mode')
+ })
+
+ raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets')
+ if raw_data_secrets:
+ ds.secrets = []
+ for secret_data in raw_data_secrets:
+ ds.secrets.append({
+ 'secret_id': secret_data['SecretID'],
+ 'secret_name': secret_data['SecretName'],
+ 'filename': secret_data['File'].get('Name'),
+ 'uid': secret_data['File'].get('UID'),
+ 'gid': secret_data['File'].get('GID'),
+ 'mode': secret_data['File'].get('Mode')
+ })
+
+ raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks'))
+ if raw_networks_data:
+ ds.networks = []
+ for network_data in raw_networks_data:
+ network = {'id': network_data['Target']}
+ if 'Aliases' in network_data:
+ network['aliases'] = network_data['Aliases']
+ if 'DriverOpts' in network_data:
+ network['options'] = network_data['DriverOpts']
+ ds.networks.append(network)
+ ds.service_version = raw_data['Version']['Index']
+ ds.service_id = raw_data['ID']
+
+ ds.init = task_template_data['ContainerSpec'].get('Init', False)
+ return ds
+
+ def update_service(self, name, old_service, new_service):
+ service_data = new_service.build_docker_service()
+ result = self.client.update_service(
+ old_service.service_id,
+ old_service.service_version,
+ name=name,
+ **service_data
+ )
+ # Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored.
+ # (see https://github.com/docker/docker-py/pull/2272)
+ self.client.report_warnings(result, ['Warning'])
+
+ def create_service(self, name, service):
+ service_data = service.build_docker_service()
+ result = self.client.create_service(name=name, **service_data)
+ self.client.report_warnings(result, ['Warning'])
+
+ def remove_service(self, name):
+ self.client.remove_service(name)
+
+ def get_image_digest(self, name, resolve=False):
+ if (
+ not name
+ or not resolve
+ ):
+ return name
+ repo, tag = parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ name = repo + ':' + tag
+ distribution_data = self.client.inspect_distribution(name)
+ digest = distribution_data['Descriptor']['digest']
+ return '%s@%s' % (name, digest)
+
+ def get_networks_names_ids(self):
+ return dict(
+ (network['Name'], network['Id']) for network in self.client.networks()
+ )
+
+ def get_missing_secret_ids(self):
+ """
+ Resolve missing secret ids by looking them up by name
+ """
+ secret_names = [
+ secret['secret_name']
+ for secret in self.client.module.params.get('secrets') or []
+ if secret['secret_id'] is None
+ ]
+ if not secret_names:
+ return {}
+ secrets = self.client.secrets(filters={'name': secret_names})
+ secrets = dict(
+ (secret['Spec']['Name'], secret['ID'])
+ for secret in secrets
+ if secret['Spec']['Name'] in secret_names
+ )
+ for secret_name in secret_names:
+ if secret_name not in secrets:
+ self.client.fail(
+ 'Could not find a secret named "%s"' % secret_name
+ )
+ return secrets
+
+ def get_missing_config_ids(self):
+ """
+ Resolve missing config ids by looking them up by name
+ """
+ config_names = [
+ config['config_name']
+ for config in self.client.module.params.get('configs') or []
+ if config['config_id'] is None
+ ]
+ if not config_names:
+ return {}
+ configs = self.client.configs(filters={'name': config_names})
+ configs = dict(
+ (config['Spec']['Name'], config['ID'])
+ for config in configs
+ if config['Spec']['Name'] in config_names
+ )
+ for config_name in config_names:
+ if config_name not in configs:
+ self.client.fail(
+ 'Could not find a config named "%s"' % config_name
+ )
+ return configs
+
+ def run(self):
+ self.diff_tracker = DifferenceTracker()
+ module = self.client.module
+
+ image = module.params['image']
+ try:
+ image_digest = self.get_image_digest(
+ name=image,
+ resolve=module.params['resolve_image']
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'Error looking for an image named %s: %s'
+ % (image, to_native(e))
+ )
+
+ try:
+ current_service = self.get_service(module.params['name'])
+ except Exception as e:
+ self.client.fail(
+ 'Error looking for service named %s: %s'
+ % (module.params['name'], to_native(e))
+ )
+ try:
+ secret_ids = self.get_missing_secret_ids()
+ config_ids = self.get_missing_config_ids()
+ network_ids = self.get_networks_names_ids()
+ new_service = DockerService.from_ansible_params(
+ module.params,
+ current_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ self.client.docker_api_version,
+ self.client.docker_py_version
+ )
+ except Exception as e:
+ return self.client.fail(
+ 'Error parsing module parameters: %s' % to_native(e)
+ )
+
+ changed = False
+ msg = 'noop'
+ rebuilt = False
+ differences = DifferenceTracker()
+ facts = {}
+
+ if current_service:
+ if module.params['state'] == 'absent':
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ msg = 'Service removed'
+ changed = True
+ else:
+ changed, differences, need_rebuild, force_update = new_service.compare(
+ current_service
+ )
+ if changed:
+ self.diff_tracker.merge(differences)
+ if need_rebuild:
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ self.create_service(
+ module.params['name'],
+ new_service
+ )
+ msg = 'Service rebuilt'
+ rebuilt = True
+ else:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service updated'
+ rebuilt = False
+ else:
+ if force_update:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service forcefully updated'
+ rebuilt = False
+ changed = True
+ else:
+ msg = 'Service unchanged'
+ facts = new_service.get_facts()
+ else:
+ if module.params['state'] == 'absent':
+ msg = 'Service absent'
+ else:
+ if not module.check_mode:
+ self.create_service(module.params['name'], new_service)
+ msg = 'Service created'
+ changed = True
+ facts = new_service.get_facts()
+
+ return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts
+
+ def run_safe(self):
+ while True:
+ try:
+ return self.run()
+ except APIError as e:
+ # Sometimes Version.Index will have changed between an inspect and
+ # update. If this is encountered we'll retry the update.
+ if self.retries > 0 and 'update out of sequence' in str(e.explanation):
+ self.retries -= 1
+ time.sleep(1)
+ else:
+ raise
+
+
+def _detect_publish_mode_usage(client):
+ for publish_def in client.module.params['publish'] or []:
+ if publish_def.get('mode'):
+ return True
+ return False
+
+
+def _detect_healthcheck_start_period(client):
+ if client.module.params['healthcheck']:
+ return client.module.params['healthcheck']['start_period'] is not None
+ return False
+
+
+def _detect_mount_tmpfs_usage(client):
+ for mount in client.module.params['mounts'] or []:
+ if mount.get('type') == 'tmpfs':
+ return True
+ if mount.get('tmpfs_size') is not None:
+ return True
+ if mount.get('tmpfs_mode') is not None:
+ return True
+ return False
+
+
+def _detect_update_config_failure_action_rollback(client):
+ rollback_config_failure_action = (
+ (client.module.params['update_config'] or {}).get('failure_action')
+ )
+ return rollback_config_failure_action == 'rollback'
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ image=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ mounts=dict(type='list', elements='dict', options=dict(
+ source=dict(type='str'),
+ target=dict(type='str', required=True),
+ type=dict(
+ type='str',
+ default='bind',
+ choices=['bind', 'volume', 'tmpfs', 'npipe'],
+ ),
+ readonly=dict(type='bool'),
+ labels=dict(type='dict'),
+ propagation=dict(
+ type='str',
+ choices=[
+ 'shared',
+ 'slave',
+ 'private',
+ 'rshared',
+ 'rslave',
+ 'rprivate'
+ ]
+ ),
+ no_copy=dict(type='bool'),
+ driver_config=dict(type='dict', options=dict(
+ name=dict(type='str'),
+ options=dict(type='dict')
+ )),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='int')
+ )),
+ configs=dict(type='list', elements='dict', options=dict(
+ config_id=dict(type='str'),
+ config_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ secrets=dict(type='list', elements='dict', no_log=False, options=dict(
+ secret_id=dict(type='str', no_log=False),
+ secret_name=dict(type='str', required=True, no_log=False),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ networks=dict(type='list', elements='raw'),
+ command=dict(type='raw'),
+ args=dict(type='list', elements='str'),
+ env=dict(type='raw'),
+ env_files=dict(type='list', elements='path'),
+ force_update=dict(type='bool', default=False),
+ groups=dict(type='list', elements='str'),
+ logging=dict(type='dict', options=dict(
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ publish=dict(type='list', elements='dict', options=dict(
+ published_port=dict(type='int', required=False),
+ target_port=dict(type='int', required=True),
+ protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']),
+ mode=dict(type='str', choices=['ingress', 'host']),
+ )),
+ placement=dict(type='dict', options=dict(
+ constraints=dict(type='list', elements='str'),
+ preferences=dict(type='list', elements='dict'),
+ replicas_max_per_node=dict(type='int'),
+ )),
+ tty=dict(type='bool'),
+ dns=dict(type='list', elements='str'),
+ dns_search=dict(type='list', elements='str'),
+ dns_options=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ hosts=dict(type='dict'),
+ labels=dict(type='dict'),
+ container_labels=dict(type='dict'),
+ mode=dict(
+ type='str',
+ default='replicated',
+ choices=['replicated', 'global']
+ ),
+ replicas=dict(type='int', default=-1),
+ endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']),
+ stop_grace_period=dict(type='str'),
+ stop_signal=dict(type='str'),
+ limits=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ read_only=dict(type='bool'),
+ reservations=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ resolve_image=dict(type='bool', default=False),
+ restart_config=dict(type='dict', options=dict(
+ condition=dict(type='str', choices=['none', 'on-failure', 'any']),
+ delay=dict(type='str'),
+ max_attempts=dict(type='int'),
+ window=dict(type='str'),
+ )),
+ rollback_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ user=dict(type='str'),
+ working_dir=dict(type='str'),
+ init=dict(type='bool'),
+ cap_add=dict(type='list', elements='str'),
+ cap_drop=dict(type='list', elements='str'),
+ )
+
+ option_minimal_versions = dict(
+ dns=dict(docker_py_version='2.6.0'),
+ dns_options=dict(docker_py_version='2.6.0'),
+ dns_search=dict(docker_py_version='2.6.0'),
+ endpoint_mode=dict(docker_py_version='3.0.0'),
+ force_update=dict(docker_py_version='2.1.0'),
+ healthcheck=dict(docker_py_version='2.6.0'),
+ hostname=dict(docker_py_version='2.2.0'),
+ hosts=dict(docker_py_version='2.6.0'),
+ groups=dict(docker_py_version='2.6.0'),
+ tty=dict(docker_py_version='2.4.0'),
+ secrets=dict(docker_py_version='2.4.0'),
+ configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ publish=dict(docker_py_version='3.0.0'),
+ read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'),
+ rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'),
+ init=dict(docker_py_version='4.0.0', docker_api_version='1.37'),
+ cap_add=dict(docker_py_version='5.0.3', docker_api_version='1.41'),
+ cap_drop=dict(docker_py_version='5.0.3', docker_api_version='1.41'),
+ # specials
+ publish_mode=dict(
+ docker_py_version='3.0.0',
+ detect_usage=_detect_publish_mode_usage,
+ usage_msg='set publish.mode'
+ ),
+ healthcheck_start_period=dict(
+ docker_py_version='2.6.0',
+ docker_api_version='1.29',
+ detect_usage=_detect_healthcheck_start_period,
+ usage_msg='set healthcheck.start_period'
+ ),
+ update_config_max_failure_ratio=dict(
+ docker_py_version='2.1.0',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'max_failure_ratio'
+ ) is not None,
+ usage_msg='set update_config.max_failure_ratio'
+ ),
+ update_config_failure_action=dict(
+ docker_py_version='3.5.0',
+ docker_api_version='1.28',
+ detect_usage=_detect_update_config_failure_action_rollback,
+ usage_msg='set update_config.failure_action.rollback'
+ ),
+ update_config_monitor=dict(
+ docker_py_version='2.1.0',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'monitor'
+ ) is not None,
+ usage_msg='set update_config.monitor'
+ ),
+ update_config_order=dict(
+ docker_py_version='2.7.0',
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set update_config.order'
+ ),
+ placement_config_preferences=dict(
+ docker_py_version='2.4.0',
+ docker_api_version='1.27',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'preferences'
+ ) is not None,
+ usage_msg='set placement.preferences'
+ ),
+ placement_config_constraints=dict(
+ docker_py_version='2.4.0',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'constraints'
+ ) is not None,
+ usage_msg='set placement.constraints'
+ ),
+ placement_config_replicas_max_per_node=dict(
+ docker_py_version='4.4.3',
+ docker_api_version='1.40',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'replicas_max_per_node'
+ ) is not None,
+ usage_msg='set placement.replicas_max_per_node'
+ ),
+ mounts_tmpfs=dict(
+ docker_py_version='2.6.0',
+ detect_usage=_detect_mount_tmpfs_usage,
+ usage_msg='set mounts.tmpfs'
+ ),
+ rollback_config_order=dict(
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set rollback_config.order'
+ ),
+ )
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='2.0.2',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ dsm = DockerServiceManager(client)
+ msg, changed, rebuilt, changes, facts = dsm.run_safe()
+
+ results = dict(
+ msg=msg,
+ changed=changed,
+ rebuilt=rebuilt,
+ changes=changes,
+ swarm_service=facts,
+ )
+ if client.module._diff:
+ before, after = dsm.diff_tracker.get_before_after()
+ results['diff'] = dict(before=before, after=after)
+
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py b/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py
new file mode 100644
index 00000000..ebe8a8e1
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service_info
+
+short_description: Retrieves information about docker services from a Swarm Manager
+
+description:
+ - Retrieves information about a docker service.
+ - Essentially returns the output of C(docker service inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - The name of the service to inspect.
+ type: str
+ required: true
+
+author:
+ - Hannes Ljungberg (@hannseman)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get info from a service
+ community.docker.docker_swarm_service_info:
+ name: myservice
+ register: result
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the service exists.
+ type: bool
+ returned: always
+ sample: true
+service:
+ description:
+ - A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
+ - Will be C(none) if service does not exist.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+
+def get_service_info(client):
+ service = client.module.params['name']
+ return client.get_service_inspect(
+ service_id=service,
+ skip_missing=True
+ )
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.0.0',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ service = get_service_info(client)
+
+ client.module.exit_json(
+ changed=False,
+ service=service,
+ exists=bool(service)
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_volume.py b/ansible_collections/community/docker/plugins/modules/docker_volume.py
new file mode 100644
index 00000000..09b1d386
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_volume.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume
+short_description: Manage Docker volumes
+description:
+ - Create/remove Docker volumes.
+ - Performs largely the same function as the C(docker volume) CLI subcommand.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ volume_name:
+ description:
+ - Name of the volume to operate on.
+ type: str
+ required: true
+ aliases:
+ - name
+
+ driver:
+ description:
+ - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
+ type: str
+ default: local
+
+ driver_options:
+ description:
+ - "Dictionary of volume settings. Consult docker docs for valid options and values:
+ U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)."
+ type: dict
+ default: {}
+
+ labels:
+ description:
+ - Dictionary of label key/values to set for the volume
+ type: dict
+
+ recreate:
+ description:
+ - Controls when a volume will be recreated when I(state) is C(present). Please
+ note that recreating an existing volume will cause B(any data in the existing volume
+ to be lost!) The volume will be deleted and a new volume with the same name will be
+ created.
+ - The value C(always) forces the volume to be always recreated.
+ - The value C(never) makes sure the volume will not be recreated.
+ - The value C(options-changed) makes sure the volume will be recreated if the volume
+ already exist and the driver, driver options or labels differ.
+ type: str
+ default: never
+ choices:
+ - always
+ - never
+ - options-changed
+
+ state:
+ description:
+ - C(absent) deletes the volume.
+ - C(present) creates the volume, if it does not already exist.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+author:
+ - Alex Grönholm (@agronholm)
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Create a volume
+ community.docker.docker_volume:
+ name: volume_one
+
+- name: Remove a volume
+ community.docker.docker_volume:
+ name: volume_one
+ state: absent
+
+- name: Create a volume with options
+ community.docker.docker_volume:
+ name: volume_two
+ driver_options:
+ type: btrfs
+ device: /dev/sda2
+'''
+
+RETURN = '''
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ DifferenceTracker,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import (
+ APIError,
+ DockerException,
+)
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.volume_name = None
+ self.driver = None
+ self.driver_options = None
+ self.labels = None
+ self.recreate = None
+ self.debug = None
+
+ for key, value in iteritems(client.module.params):
+ setattr(self, key, value)
+
+
+class DockerVolumeManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_volume = self.get_existing_volume()
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_volume(self):
+ try:
+ volumes = self.client.get_json('/volumes')
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ if volumes[u'Volumes'] is None:
+ return None
+
+ for volume in volumes[u'Volumes']:
+ if volume['Name'] == self.parameters.volume_name:
+ return volume
+
+ return None
+
+ def has_different_config(self):
+ """
+ Return the list of differences between the current parameters and the existing volume.
+
+ :return: list of options that differ
+ """
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
+ differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
+ if self.parameters.driver_options:
+ if not self.existing_volume.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=self.existing_volume.get('Options'))
+ else:
+ for key, value in iteritems(self.parameters.driver_options):
+ if (not self.existing_volume['Options'].get(key) or
+ value != self.existing_volume['Options'][key]):
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=self.existing_volume['Options'].get(key))
+ if self.parameters.labels:
+ existing_labels = self.existing_volume.get('Labels', {})
+ for label in self.parameters.labels:
+ if existing_labels.get(label) != self.parameters.labels.get(label):
+ differences.add('labels.%s' % label,
+ parameter=self.parameters.labels.get(label),
+ active=existing_labels.get(label))
+
+ return differences
+
+ def create_volume(self):
+ if not self.existing_volume:
+ if not self.check_mode:
+ try:
+ data = {
+ 'Name': self.parameters.volume_name,
+ 'Driver': self.parameters.driver,
+ 'DriverOpts': self.parameters.driver_options,
+ }
+ if self.parameters.labels is not None:
+ data['Labels'] = self.parameters.labels
+ resp = self.client.post_json_to_json('/volumes/create', data=data)
+ self.existing_volume = self.client.get_json('/volumes/{0}', resp['Name'])
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_volume(self):
+ if self.existing_volume:
+ if not self.check_mode:
+ try:
+ self.client.delete_call('/volumes/{0}', self.parameters.volume_name)
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
+ self.results['changed'] = True
+
+ def present(self):
+ differences = DifferenceTracker()
+ if self.existing_volume:
+ differences = self.has_different_config()
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
+ if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
+ self.remove_volume()
+ self.existing_volume = None
+
+ self.create_volume()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ volume_facts = self.get_existing_volume()
+ self.results['volume'] = volume_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
+ self.remove_volume()
+
+
+def main():
+ argument_spec = dict(
+ volume_name=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='local'),
+ driver_options=dict(type='dict', default={}),
+ labels=dict(type='dict'),
+ recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
+ debug=dict(type='bool', default=False)
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ # "The docker server >= 1.9.0"
+ )
+
+ try:
+ cm = DockerVolumeManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_volume_info.py b/ansible_collections/community/docker/plugins/modules/docker_volume_info.py
new file mode 100644
index 00000000..100010ba
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_volume_info.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume_info
+short_description: Retrieve facts about Docker volumes
+description:
+ - Performs largely the same function as the C(docker volume inspect) CLI subcommand.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - Name of the volume to inspect.
+ type: str
+ required: true
+ aliases:
+ - volume_name
+
+author:
+ - Felix Fontein (@felixfontein)
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get infos on volume
+ community.docker.docker_volume_info:
+ name: mydata
+ register: result
+
+- name: Does volume exist?
+ ansible.builtin.debug:
+ msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about volume
+ ansible.builtin.debug:
+ var: result.volume
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the volume exists.
+ type: bool
+ returned: always
+ sample: true
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Will be C(none) if volume does not exist.
+ returned: success
+ type: dict
+ sample: '{
+ "CreatedAt": "2018-12-09T17:43:44+01:00",
+ "Driver": "local",
+ "Labels": null,
+ "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
+ "Name": "ansible-test-bd3f6172",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound
+
+
+def get_existing_volume(client, volume_name):
+ try:
+ return client.get_json('/volumes/{0}', volume_name)
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ client.fail("Error inspecting volume: %s" % to_native(exc))
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['volume_name']),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ volume = get_existing_volume(client, client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if volume else False),
+ volume=volume,
+ )
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/plugin_utils/common.py b/ansible_collections/community/docker/plugins/plugin_utils/common.py
new file mode 100644
index 00000000..f3c7c05c
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/plugin_utils/common.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.utils.display import Display
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClientBase,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DOCKER_COMMON_ARGS,
+)
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+ def __init__(self, plugin, min_docker_version=None, min_docker_api_version=None):
+ self.plugin = plugin
+ self.display = Display()
+ super(AnsibleDockerClient, self).__init__(
+ min_docker_version=min_docker_version,
+ min_docker_api_version=min_docker_api_version)
+
+ def fail(self, msg, **kwargs):
+ if kwargs:
+ msg += '\nContext:\n' + '\n'.join(' {0} = {1!r}'.format(k, v) for (k, v) in kwargs.items())
+ raise AnsibleConnectionFailure(msg)
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ self.display.deprecated(msg, version=version, date=date, collection_name=collection_name)
+
+ def _get_params(self):
+ return dict([
+ (option, self.plugin.get_option(option))
+ for option in DOCKER_COMMON_ARGS
+ ])
diff --git a/ansible_collections/community/docker/plugins/plugin_utils/common_api.py b/ansible_collections/community/docker/plugins/plugin_utils/common_api.py
new file mode 100644
index 00000000..eda28d48
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/plugin_utils/common_api.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.utils.display import Display
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClientBase,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DOCKER_COMMON_ARGS,
+)
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+ def __init__(self, plugin, min_docker_api_version=None):
+ self.plugin = plugin
+ self.display = Display()
+ super(AnsibleDockerClient, self).__init__(
+ min_docker_api_version=min_docker_api_version)
+
+ def fail(self, msg, **kwargs):
+ if kwargs:
+ msg += '\nContext:\n' + '\n'.join(' {0} = {1!r}'.format(k, v) for (k, v) in kwargs.items())
+ raise AnsibleConnectionFailure(msg)
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ self.display.deprecated(msg, version=version, date=date, collection_name=collection_name)
+
+ def _get_params(self):
+ return dict([
+ (option, self.plugin.get_option(option))
+ for option in DOCKER_COMMON_ARGS
+ ])
diff --git a/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py b/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py
new file mode 100644
index 00000000..204996f2
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.compat import selectors
+
+from ansible_collections.community.docker.plugins.module_utils.socket_handler import (
+ DockerSocketHandlerBase,
+)
+
+
+class DockerSocketHandler(DockerSocketHandlerBase):
+ def __init__(self, display, sock, log=None, container=None):
+ super(DockerSocketHandler, self).__init__(sock, selectors, log=lambda msg: display.vvvv(msg, host=container))
diff --git a/ansible_collections/community/docker/tests/config.yml b/ansible_collections/community/docker/tests/config.yml
new file mode 100644
index 00000000..5444c9e1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/config.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# See template for more information:
+# https://github.com/ansible/ansible/blob/devel/test/lib/ansible_test/config/config.yml
+modules:
+ python_requires: '>= 2.7'
diff --git a/ansible_collections/community/docker/tests/ee/all.yml b/ansible_collections/community/docker/tests/ee/all.yml
new file mode 100644
index 00000000..907866f9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/ee/all.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ vars:
+ docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8
+ tasks:
+ - name: Find all roles
+ find:
+ paths:
+ - "{{ (playbook_dir | default('.')) ~ '/roles' }}"
+ file_type: directory
+ depth: 1
+ register: result
+ - name: Include all roles
+ include_role:
+ name: "{{ item }}"
+ loop: "{{ result.files | map(attribute='path') | map('regex_replace', '.*/', '') | sort }}"
diff --git a/ansible_collections/community/docker/tests/ee/roles/current_container_facts/tasks/main.yml b/ansible_collections/community/docker/tests/ee/roles/current_container_facts/tasks/main.yml
new file mode 100644
index 00000000..d5096cdd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/ee/roles/current_container_facts/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Retrieve information on current container
+ community.docker.current_container_facts:
+ register: result
+
+# The following two tasks are useful if we ever have to debug why this fails.
+
+- name: Print all Ansible facts
+ debug:
+ var: ansible_facts
+
+- name: Read some files
+ slurp:
+ src: "{{ item }}"
+ loop:
+ - /proc/self/cpuset
+ - /proc/1/cgroup
+ - /proc/1/environ
+
+- name: Print facts returned by module
+ debug:
+ var: result.ansible_facts
+
+- name: Validate results
+ assert:
+ that:
+ - ansible_module_running_in_container
+ - ansible_module_container_type != ''
diff --git a/ansible_collections/community/docker/tests/ee/roles/docker_plain/tasks/main.yml b/ansible_collections/community/docker/tests/ee/roles/docker_plain/tasks/main.yml
new file mode 100644
index 00000000..9c2be8a0
--- /dev/null
+++ b/ansible_collections/community/docker/tests/ee/roles/docker_plain/tasks/main.yml
@@ -0,0 +1,32 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Create random name prefix (for containers, networks, ...)
+- name: Create random container name prefix
+ set_fact:
+ cname_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+
+- name: Make sure image is absent
+ community.docker.docker_image:
+ name: "{{ docker_test_image_alpine }}"
+ state: absent
+
+- name: Make sure image is pulled
+ community.docker.docker_image:
+ name: "{{ docker_test_image_alpine }}"
+ source: pull
+
+- name: Start container
+ community.docker.docker_container:
+ name: "{{ cname_prefix }}-1"
+ image: "{{ docker_test_image_alpine }}"
+ state: started
+
+- name: Remove container
+ community.docker.docker_container:
+ name: "{{ cname_prefix }}-1"
+ state: absent
+ stop_timeout: 1
+ force_kill: true
diff --git a/ansible_collections/community/docker/tests/ee/roles/docker_stack/tasks/main.yml b/ansible_collections/community/docker/tests/ee/roles/docker_stack/tasks/main.yml
new file mode 100644
index 00000000..5d4d5698
--- /dev/null
+++ b/ansible_collections/community/docker/tests/ee/roles/docker_stack/tasks/main.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Currently the docker_stack* modules are not supported in the EE since we'd need to install the Docker CLI client
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection/aliases b/ansible_collections/community/docker/tests/integration/targets/connection/aliases
new file mode 100644
index 00000000..a02a2d61
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+hidden
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection/test.sh b/ansible_collections/community/docker/tests/integration/targets/connection/test.sh
new file mode 100755
index 00000000..793a85dd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection/test.sh
@@ -0,0 +1,16 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+[ -f "${INVENTORY}" ]
+
+# Run connection tests with both the default and C locale.
+
+ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
+
+if ansible --version | grep ansible | grep -E ' 2\.(9|10|11|12|13)\.'; then
+ LC_ALL=C LANG=C ansible-playbook test_connection.yml -i "${INVENTORY}" "$@"
+fi
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection/test_connection.yml b/ansible_collections/community/docker/tests/integration/targets/connection/test_connection.yml
new file mode 100644
index 00000000..bb0a9939
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection/test_connection.yml
@@ -0,0 +1,48 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: "{{ target_hosts }}"
+ gather_facts: false
+ serial: 1
+ tasks:
+
+ ### raw with unicode arg and output
+
+ - name: raw with unicode arg and output
+ raw: echo 汉语
+ register: command
+ - name: check output of raw with unicode arg and output
+ assert:
+ that:
+ - "'汉语' in command.stdout"
+ - command is changed # as of 2.2, raw should default to changed: true for consistency w/ shell/command/script modules
+
+ ### copy local file with unicode filename and content
+
+ - name: create local file with unicode filename and content
+ local_action: lineinfile dest={{ local_tmp }}-汉语/汉语.txt create=true line=汉语
+ - name: remove remote file with unicode filename and content
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语/汉语.txt state=absent"
+ - name: create remote directory with unicode name
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=directory"
+ - name: copy local file with unicode filename and content
+ action: "{{ action_prefix }}copy src={{ local_tmp }}-汉语/汉语.txt dest={{ remote_tmp }}-汉语/汉语.txt"
+
+ ### fetch remote file with unicode filename and content
+
+ - name: remove local file with unicode filename and content
+ local_action: file path={{ local_tmp }}-汉语/汉语.txt state=absent
+ - name: fetch remote file with unicode filename and content
+ fetch: src={{ remote_tmp }}-汉语/汉语.txt dest={{ local_tmp }}-汉语/汉语.txt fail_on_missing=true validate_checksum=true flat=true
+
+ ### remove local and remote temp files
+
+ - name: remove local temp file
+ local_action: file path={{ local_tmp }}-汉语 state=absent
+ - name: remove remote temp file
+ action: "{{ action_prefix }}file path={{ remote_tmp }}-汉语 state=absent"
+
+ ### test wait_for_connection plugin
+ - ansible.builtin.wait_for_connection:
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker/aliases b/ansible_collections/community/docker/tests/integration/targets/connection_docker/aliases
new file mode 100644
index 00000000..40eff16f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+skip/docker # coverage does not work if we're inside a docker container, since we cannot access this container's /tmp dir from the new container
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/connection_docker/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme-connection.sh b/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme-connection.sh
new file mode 100755
index 00000000..f374af7f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme-connection.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+PYTHON="$(command -v python3 python | head -n1)"
+
+group=$(${PYTHON} -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme.sh b/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme.sh
new file mode 100755
index 00000000..0965c5d7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker/runme.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# If you use another image, you possibly also need to adjust
+# ansible_python_interpreter in test_connection.inventory.
+source ../setup_docker/vars/main.env
+IMAGE="${DOCKER_TEST_IMAGE_PYTHON3}"
+
+# Setup phase
+
+echo "Setup"
+ANSIBLE_ROLES_PATH=.. ansible-playbook setup.yml
+
+# If docker wasn't installed, don't run the tests
+if [ "$(command -v docker)" == "" ]; then
+ exit
+fi
+
+
+# Test phase
+
+CONTAINER_SUFFIX=-${RANDOM}
+
+DOCKER_CONTAINERS="docker-connection-test-container${CONTAINER_SUFFIX}"
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+cleanup() {
+ echo "Cleanup"
+ docker rm -f ${DOCKER_CONTAINERS}
+ echo "Shutdown"
+ ANSIBLE_ROLES_PATH=.. ansible-playbook shutdown.yml
+ echo "Done"
+}
+
+trap cleanup INT TERM EXIT
+
+echo "Start containers"
+for CONTAINER in ${DOCKER_CONTAINERS}; do
+ if [ "${ANSIBLE_TEST_COVERAGE:-}" == "" ]; then
+ docker run --rm --name ${CONTAINER} --detach "${IMAGE}" /bin/sh -c 'sleep 10m'
+ else
+ docker run --rm --name ${CONTAINER} --detach -v /tmp:/tmp "${IMAGE}" /bin/sh -c 'sleep 10m'
+ docker exec ${CONTAINER} pip3 install coverage
+ fi
+ echo ${CONTAINER}
+done
+
+cat > test_connection.inventory << EOF
+[docker]
+docker-no-pipelining ansible_pipelining=false
+docker-pipelining ansible_pipelining=true
+
+[docker:vars]
+ansible_host=docker-connection-test-container${CONTAINER_SUFFIX}
+ansible_connection=community.docker.docker
+ansible_python_interpreter=/usr/local/bin/python3
+EOF
+
+echo "Run tests"
+./runme-connection.sh "$@"
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker/setup.yml b/ansible_collections/community/docker/tests/integration/targets/connection_docker/setup.yml
new file mode 100644
index 00000000..e522a51f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker/setup.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ connection: local
+ vars:
+ docker_skip_cleanup: true
+
+ tasks:
+ - name: Setup docker
+ import_role:
+ name: setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker/shutdown.yml b/ansible_collections/community/docker/tests/integration/targets/connection_docker/shutdown.yml
new file mode 100644
index 00000000..122cf059
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker/shutdown.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ connection: local
+ vars:
+ docker_skip_cleanup: true
+
+ tasks:
+ - name: Remove docker packages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name:
+ - docker
+ - docker-ce
+ - docker-ce-cli
+ state: absent
+ when: not docker_skip_cleanup
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/aliases b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/aliases
new file mode 100644
index 00000000..40eff16f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+skip/docker # coverage does not work if we're inside a docker container, since we cannot access this container's /tmp dir from the new container
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme-connection.sh b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme-connection.sh
new file mode 100755
index 00000000..f374af7f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme-connection.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+PYTHON="$(command -v python3 python | head -n1)"
+
+group=$(${PYTHON} -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme.sh b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme.sh
new file mode 100755
index 00000000..893b019a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/runme.sh
@@ -0,0 +1,65 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# If you use another image, you possibly also need to adjust
+# ansible_python_interpreter in test_connection.inventory.
+source ../setup_docker/vars/main.env
+IMAGE="${DOCKER_TEST_IMAGE_PYTHON3}"
+
+# Setup phase
+
+echo "Setup"
+ANSIBLE_ROLES_PATH=.. ansible-playbook setup.yml
+
+# If docker wasn't installed, don't run the tests
+if [ "$(command -v docker)" == "" ]; then
+ exit
+fi
+
+
+# Test phase
+
+CONTAINER_SUFFIX=-${RANDOM}
+
+DOCKER_CONTAINERS="docker-connection-test-container${CONTAINER_SUFFIX}"
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+cleanup() {
+ echo "Cleanup"
+ docker rm -f ${DOCKER_CONTAINERS}
+ echo "Shutdown"
+ ANSIBLE_ROLES_PATH=.. ansible-playbook shutdown.yml
+ echo "Done"
+}
+
+trap cleanup INT TERM EXIT
+
+echo "Start containers"
+for CONTAINER in ${DOCKER_CONTAINERS}; do
+ if [ "${ANSIBLE_TEST_COVERAGE:-}" == "" ]; then
+ docker run --rm --name ${CONTAINER} --detach "${IMAGE}" /bin/sh -c 'sleep 10m'
+ else
+ docker run --rm --name ${CONTAINER} --detach -v /tmp:/tmp "${IMAGE}" /bin/sh -c 'sleep 10m'
+ docker exec ${CONTAINER} pip3 install coverage
+ fi
+ echo ${CONTAINER}
+done
+
+cat > test_connection.inventory << EOF
+[docker_api]
+docker_api-no-pipelining ansible_pipelining=false
+docker_api-pipelining ansible_pipelining=true
+
+[docker_api:vars]
+ansible_host=docker-connection-test-container${CONTAINER_SUFFIX}
+ansible_connection=community.docker.docker_api
+ansible_python_interpreter=/usr/local/bin/python3
+EOF
+
+echo "Run tests"
+./runme-connection.sh "$@"
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/setup.yml b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/setup.yml
new file mode 100644
index 00000000..e522a51f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/setup.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ connection: local
+ vars:
+ docker_skip_cleanup: true
+
+ tasks:
+ - name: Setup docker
+ import_role:
+ name: setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/shutdown.yml b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/shutdown.yml
new file mode 100644
index 00000000..122cf059
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_docker_api/shutdown.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ connection: local
+ vars:
+ docker_skip_cleanup: true
+
+ tasks:
+ - name: Remove docker packages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name:
+ - docker
+ - docker-ce
+ - docker-ce-cli
+ state: absent
+ when: not docker_skip_cleanup
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/aliases b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/aliases
new file mode 100644
index 00000000..40067d9d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/5
+skip/docker # this requires unfettered access to the container host
+skip/rhel7.9 # nsenter does not work out of the box
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/runme-connection.sh b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/runme-connection.sh
new file mode 100755
index 00000000..f374af7f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/runme-connection.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+PYTHON="$(command -v python3 python | head -n1)"
+
+group=$(${PYTHON} -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/runme.sh b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/runme.sh
new file mode 100755
index 00000000..eebbb6a3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/runme.sh
@@ -0,0 +1,73 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -euo pipefail
+
+[[ -n "${DEBUG:-}" || -n "${ANSIBLE_DEBUG:-}" ]] && set -x
+
+readonly IMAGE="quay.io/ansible/ansible-runner:devel"
+readonly PYTHON="$(command -v python3 python | head -n1)"
+
+# Determine collection root
+COLLECTION_ROOT=./
+while true; do
+ if [ -e ${COLLECTION_ROOT}galaxy.yml ] || [ -e ${COLLECTION_ROOT}MANIFEST.json ]; then
+ break
+ fi
+ COLLECTION_ROOT="${COLLECTION_ROOT}../"
+done
+readonly COLLECTION_ROOT="$(cd ${COLLECTION_ROOT} ; pwd)"
+
+# Setup phase
+echo "Setup"
+ANSIBLE_ROLES_PATH=.. ansible-playbook setup.yml
+
+# If docker wasn't installed, don't run the tests
+if [ "$(command -v docker)" == "" ]; then
+ exit
+fi
+
+cleanup() {
+ echo "Cleanup"
+ echo "Shutdown"
+ ANSIBLE_ROLES_PATH=.. ansible-playbook shutdown.yml
+ echo "Done"
+}
+
+envs=(--env "HOME=${HOME:-}")
+while IFS=$'\0' read -d '' -r line; do
+ key="$(echo "$line" | cut -d= -f1)"
+ value="$(echo "$line" | cut -d= -f2-)"
+ if [[ "${key}" =~ ^(ANSIBLE_|JUNIT_OUTPUT_DIR$|OUTPUT_DIR$|PYTHONPATH$) ]]; then
+ envs+=(--env "${key}=${value}")
+ fi
+done < <(printenv -0)
+
+# Test phase
+cat > test_connection.inventory << EOF
+[nsenter]
+nsenter-no-pipelining ansible_pipelining=false
+nsenter-pipelining ansible_pipelining=true
+
+[nsenter:vars]
+ansible_host=localhost
+ansible_connection=community.docker.nsenter
+ansible_host_volume_mount=/host
+ansible_nsenter_pid=1
+ansible_python_interpreter=${PYTHON}
+EOF
+
+echo "Run tests"
+set -x
+docker run \
+ -i \
+ --rm \
+ --privileged \
+ --pid host \
+ "${envs[@]}" \
+ --volume "${COLLECTION_ROOT}:${COLLECTION_ROOT}" \
+ --workdir "$(pwd)" \
+ "${IMAGE}" \
+ ./runme-connection.sh "$@"
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/setup.yml b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/setup.yml
new file mode 100644
index 00000000..e522a51f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/setup.yml
@@ -0,0 +1,14 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ connection: local
+ vars:
+ docker_skip_cleanup: true
+
+ tasks:
+ - name: Setup docker
+ import_role:
+ name: setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/shutdown.yml b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/shutdown.yml
new file mode 100644
index 00000000..122cf059
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_nsenter/shutdown.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: localhost
+ connection: local
+ vars:
+ docker_skip_cleanup: true
+
+ tasks:
+ - name: Remove docker packages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name:
+ - docker
+ - docker-ce
+ - docker-ce-cli
+ state: absent
+ when: not docker_skip_cleanup
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_posix/aliases b/ansible_collections/community/docker/tests/integration/targets/connection_posix/aliases
new file mode 100644
index 00000000..44561e2f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_posix/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+needs/target/connection
+hidden
diff --git a/ansible_collections/community/docker/tests/integration/targets/connection_posix/test.sh b/ansible_collections/community/docker/tests/integration/targets/connection_posix/test.sh
new file mode 100755
index 00000000..f374af7f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/connection_posix/test.sh
@@ -0,0 +1,23 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -eux
+
+# Connection tests for POSIX platforms use this script by linking to it from the appropriate 'connection_' target dir.
+# The name of the inventory group to test is extracted from the directory name following the 'connection_' prefix.
+
+PYTHON="$(command -v python3 python | head -n1)"
+
+group=$(${PYTHON} -c \
+ "from os import path; print(path.basename(path.abspath(path.dirname('$0'))).replace('connection_', ''))")
+
+cd ../connection
+
+INVENTORY="../connection_${group}/test_connection.inventory" ./test.sh \
+ -e target_hosts="${group}" \
+ -e action_prefix= \
+ -e local_tmp=/tmp/ansible-local \
+ -e remote_tmp=/tmp/ansible-remote \
+ "$@"
diff --git a/ansible_collections/community/docker/tests/integration/targets/current_container_facts/aliases b/ansible_collections/community/docker/tests/integration/targets/current_container_facts/aliases
new file mode 100644
index 00000000..8577a295
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/current_container_facts/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+skip/rhel
diff --git a/ansible_collections/community/docker/tests/integration/targets/current_container_facts/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/current_container_facts/tasks/main.yml
new file mode 100644
index 00000000..a0d1ae79
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/current_container_facts/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Get facts
+ current_container_facts:
+ register: result
+
+ # WARNING: This is not a proper test as it won't fail when the module does not work!
+ # To make this a proper test, we need to know the environment in which this
+ # test runs, which we do not know in general...
+
+- name: Print facts
+ ansible.builtin.debug:
+ var: result.ansible_facts
+
+- name: Read files
+ ansible.builtin.slurp:
+ src: '{{ item }}'
+ loop:
+ - /proc/self/cgroup
+ - /proc/self/cpuset
+ - /proc/self/mountinfo
+ register: slurp
+ ignore_errors: true
+
+- name: Print files
+ ansible.builtin.debug:
+ msg: |-
+ {{ item.content | ansible.builtin.b64decode | split('
+ ') }}
+ loop: '{{ slurp.results }}'
+ loop_control:
+ label: '{{ item.source | default(item.item) }}'
+ when: item is not failed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_compose/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_compose/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_compose/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_compose/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_compose/meta/main.yml
new file mode 100644
index 00000000..7f44c871
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_compose/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker_compose
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/main.yml
new file mode 100644
index 00000000..d3c7eae5
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/main.yml
@@ -0,0 +1,47 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Create random name prefix (for containers, networks, ...)
+- name: Create random container name prefix
+ set_fact:
+ cname_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+ cnames: []
+ dnetworks: []
+
+- debug:
+ msg: "Using container name prefix {{ cname_prefix }}"
+
+# Run the tests
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ with_items: "{{ cnames }}"
+ diff: false
+ - name: "Make sure all networks are removed"
+ docker_network:
+ name: "{{ item }}"
+ state: absent
+ force: true
+ with_items: "{{ dnetworks }}"
+ when: docker_py_version is version('1.10.0', '>=')
+ diff: false
+
+ when: has_docker_compose and docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run all docker_container tests!"
+ when: has_docker_compose and not(docker_py_version is version('3.5.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/tests/options.yml b/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/tests/options.yml
new file mode 100644
index 00000000..f2440317
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/tests/options.yml
@@ -0,0 +1,243 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ pname: "{{ cname_prefix }}"
+ cname_1: "{{ cname_prefix ~ '1' }}"
+ cname_2: "{{ cname_prefix ~ '2' }}"
+
+####################################################################
+## Profiles ########################################################
+####################################################################
+
+- block:
+ - name: Define service
+ set_fact:
+ test_service: |
+ version: '3'
+ services:
+ {{ cname_1 }}:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ profiles:
+ - profile_1
+ - profile_all
+ stop_grace_period: 1s
+ {{ cname_2 }}:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ profiles:
+ - profile_2
+ - profile_all
+ stop_grace_period: 1s
+ test_cases:
+ - test_name: no services enabled
+ - test_name: enable 1
+ profiles_value:
+ - profile_1
+ - test_name: stop all services
+ profiles_value:
+ - profile_1
+ stopped_value: true
+ - test_name: enable 2
+ profiles_value:
+ - profile_2
+ - test_name: stop all services
+ profiles_value:
+ - profile_2
+ stopped_value: true
+ - test_name: enable both
+ profiles_value:
+ - profile_1
+ - profile_2
+ - test_name: stop all services
+ profiles_value:
+ - profile_1
+ - profile_2
+ stopped_value: true
+ - test_name: enable all
+ profiles_value:
+ - profile_all
+
+ - name: Profiles ({{ test_case.test_name }})
+ docker_compose:
+ project_name: "{{ pname }}"
+ definition: "{{ test_service | from_yaml }}"
+ profiles: "{{ test_case.profiles_value | default(omit) }}"
+ stopped: "{{ test_case.stopped_value | default(omit) }}"
+ state: present
+ register: profiles_outputs
+ loop: "{{ test_cases }}"
+ loop_control:
+ loop_var: test_case
+
+ - name: Cleanup
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: absent
+ definition: "{{ test_service | from_yaml }}"
+
+ - assert:
+ that:
+ - profiles_outputs.results[0] is not changed
+ - profiles_outputs.results[1].services[cname_1][cname_1_name].state.running
+ - profiles_outputs.results[1].services[cname_2] == {}
+ - not profiles_outputs.results[2].services[cname_1][cname_1_name].state.running
+ - profiles_outputs.results[2].services[cname_2] == {}
+ - not profiles_outputs.results[3].services[cname_1][cname_1_name].state.running
+ - profiles_outputs.results[3].services[cname_2][cname_2_name].state.running
+ - not profiles_outputs.results[4].services[cname_1][cname_1_name].state.running
+ - not profiles_outputs.results[4].services[cname_2][cname_2_name].state.running
+ - profiles_outputs.results[5].services[cname_1][cname_1_name].state.running
+ - profiles_outputs.results[5].services[cname_2][cname_2_name].state.running
+ - not profiles_outputs.results[6].services[cname_1][cname_1_name].state.running
+ - not profiles_outputs.results[6].services[cname_2][cname_2_name].state.running
+ - profiles_outputs.results[7].services[cname_1][cname_1_name].state.running
+ - profiles_outputs.results[7].services[cname_2][cname_2_name].state.running
+ vars:
+ cname_1_name: "{{ pname + '_' + cname_1 + '_1' }}"
+ cname_2_name: "{{ pname + '_' + cname_2 + '_1' }}"
+ when: docker_compose_version is version('1.28.0', '>=')
+
+####################################################################
+## Env_file ########################################################
+####################################################################
+
+- block:
+ - name: Define service and files
+ set_fact:
+ compose_file: "{{ remote_tmp_dir }}/docker-compose.yml"
+ env_file: "{{ remote_tmp_dir }}/.env"
+ env_sleep_cmd: sleep 10m
+ new_env_file: "{{ remote_tmp_dir }}/new.env"
+ new_env_sleep_cmd: sleep 20m
+ test_service: |
+ version: '3'
+ services:
+ {{ cname_1 }}:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "${SLEEP_CMD}"'
+ stop_grace_period: 1s
+
+ - name: Define testcases
+ set_fact:
+ test_cases:
+ - test_name: Without env_file option
+ - test_name: With env_file option
+ env_file: "{{ new_env_file }}"
+
+ - name: Generate compose file
+ ansible.builtin.copy:
+ content: "{{ test_service }}"
+ dest: "{{ compose_file }}"
+
+ - name: Generate .env file
+ ansible.builtin.copy:
+ content: |
+ SLEEP_CMD="{{ env_sleep_cmd }}"
+ dest: "{{ env_file }}"
+
+ - name: Generate new.env file
+ ansible.builtin.copy:
+ content: |
+ SLEEP_CMD="{{ new_env_sleep_cmd }}"
+ dest: "{{ new_env_file }}"
+
+ - name: Env_file
+ docker_compose:
+ project_name: "{{ pname }}"
+ project_src: "{{ remote_tmp_dir }}"
+ env_file: "{{ test_case.env_file | default(omit) }}"
+ register: env_file_outputs
+ loop: "{{ test_cases }}"
+ loop_control:
+ loop_var: test_case
+
+ - name: Cleanup
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: absent
+ definition: "{{ test_service | from_yaml }}"
+
+ - assert:
+ that:
+ - "env_sleep_cmd is in (env_file_outputs.results[0].services[cname_1][cname_1_name].cmd | join(' '))"
+ - "new_env_sleep_cmd is in (env_file_outputs.results[1].services[cname_1][cname_1_name].cmd | join(' '))"
+ vars:
+ cname_1_name: "{{ pname + '_' + cname_1 + '_1' }}"
+ cname_2_name: "{{ pname + '_' + cname_2 + '_1' }}"
+
+ - name: Remove files
+ ansible.builtin.file:
+ path: "{{ file_path }}"
+ state: absent
+ loop_control:
+ loop_var: file_path
+ loop:
+ - "{{ compose_file }}"
+ - "{{ env_file }}"
+ - "{{ new_env_file }}"
+ when: docker_compose_version is version('1.25.0', '>=')
+
+####################################################################
+## Project_src #####################################################
+####################################################################
+
+- name: Define service and files
+ set_fact:
+ compose_file: "{{ remote_tmp_dir }}/docker-compose.yml"
+ env_sleep_cmd: sleep 10m
+ new_env_sleep_cmd: sleep 20m
+ test_service: |
+ version: '3'
+ services:
+ {{ cname_1 }}:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c 10m'
+ stop_grace_period: 1s
+
+- name: Generate compose file
+ ansible.builtin.copy:
+ content: "{{ test_service }}"
+ dest: "{{ compose_file }}"
+
+- name: Start with project_src
+ docker_compose:
+ project_src: "{{ remote_tmp_dir }}"
+ register: project_src_1
+
+- name: Start with project_src (idempotent)
+ docker_compose:
+ project_src: "{{ remote_tmp_dir }}"
+ register: project_src_2
+
+- name: Stop with project_src
+ docker_compose:
+ project_src: "{{ remote_tmp_dir }}"
+ state: absent
+ register: project_src_3
+
+- name: Stop with project_src (idempotent)
+ docker_compose:
+ project_src: "{{ remote_tmp_dir }}"
+ state: absent
+ register: project_src_4
+
+- name: Remove files
+ ansible.builtin.file:
+ path: "{{ file_path }}"
+ state: absent
+ loop_control:
+ loop_var: file_path
+ loop:
+ - "{{ compose_file }}"
+
+- assert:
+ that:
+ - project_src_1 is changed
+ # - project_src_2 is not changed -- for some reason, this currently fails!
+ - project_src_3 is changed
+ - project_src_4 is not changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/tests/start-stop.yml b/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/tests/start-stop.yml
new file mode 100644
index 00000000..89945071
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_compose/tasks/tests/start-stop.yml
@@ -0,0 +1,233 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ pname: "{{ cname_prefix }}"
+ cname: "{{ cname_prefix ~ '-hi' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [pname ~ '-' ~ cname] }}"
+ dnetworks: "{{ dnetworks + [pname ~ '_default'] }}"
+
+- name: Define service
+ set_fact:
+ test_service: |
+ version: '3'
+ services:
+ {{ cname }}:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ stop_grace_period: 1s
+ test_service_mod: |
+ version: '3'
+ services:
+ {{ cname }}:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 15m"'
+ stop_grace_period: 1s
+
+####################################################################
+## Present #########################################################
+####################################################################
+
+- name: Present (check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ check_mode: true
+ register: present_1
+
+- name: Present
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ register: present_2
+
+- name: Present (idempotent)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ register: present_3
+
+- name: Present (idempotent check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ check_mode: true
+ register: present_4
+
+- name: Present (changed check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service_mod | from_yaml }}"
+ check_mode: true
+ register: present_5
+
+- name: Present (changed)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service_mod | from_yaml }}"
+ register: present_6
+
+- assert:
+ that:
+ - present_1 is changed
+ - present_2 is changed
+ - present_3 is not changed
+ - present_4 is not changed
+ - present_5 is changed
+ - present_6 is changed
+
+####################################################################
+## Absent ##########################################################
+####################################################################
+
+- name: Absent (check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: absent
+ definition: "{{ test_service_mod | from_yaml }}"
+ check_mode: true
+ register: absent_1
+
+- name: Absent
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: absent
+ definition: "{{ test_service_mod | from_yaml }}"
+ register: absent_2
+
+- name: Absent (idempotent)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: absent
+ definition: "{{ test_service_mod | from_yaml }}"
+ register: absent_3
+
+- name: Absent (idempotent check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: absent
+ definition: "{{ test_service_mod | from_yaml }}"
+ check_mode: true
+ register: absent_4
+
+- assert:
+ that:
+ - absent_1 is changed
+ - absent_2 is changed
+ - absent_3 is not changed
+ - absent_4 is not changed
+
+####################################################################
+## Stopping and starting ###########################################
+####################################################################
+
+- name: Present stopped (check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ stopped: true
+ check_mode: true
+ register: present_1
+
+- name: Present stopped
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ stopped: true
+ register: present_2
+
+- name: Present stopped (idempotent)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ stopped: true
+ register: present_3
+
+- name: Present stopped (idempotent check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ stopped: true
+ check_mode: true
+ register: present_4
+
+- name: Started (check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ check_mode: true
+ register: started_1
+
+- name: Started
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ register: started_2
+
+- name: Started (idempotent)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ register: started_3
+
+- name: Started (idempotent check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ check_mode: true
+ register: started_4
+
+- name: Stopped (check)
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ stopped: true
+ check_mode: true
+ register: stopped_1
+
+- name: Stopped
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: present
+ definition: "{{ test_service | from_yaml }}"
+ stopped: true
+ register: stopped_2
+
+- name: Cleanup
+ docker_compose:
+ project_name: "{{ pname }}"
+ state: absent
+ definition: "{{ test_service | from_yaml }}"
+
+- assert:
+ that:
+ - present_1 is changed
+ - present_2 is changed
+ - present_3 is not changed
+ - present_4 is not changed
+ - started_1 is changed
+ - started_2 is changed
+ - started_3 is not changed
+ - started_4 is not changed
+ - stopped_1 is changed
+ - stopped_2 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_config/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_config/aliases
new file mode 100644
index 00000000..fc581d54
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_config/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/3
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_config/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_config/meta/main.yml
new file mode 100644
index 00000000..2650229d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_config/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/main.yml
new file mode 100644
index 00000000..1a713e79
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_docker_config.yml
+ when: docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.30', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_config tests!"
+ when: not(docker_py_version is version('2.6.0', '>=') and docker_api_version is version('1.30', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/test_docker_config.yml b/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/test_docker_config.yml
new file mode 100644
index 00000000..015e8003
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_config/tasks/test_docker_config.yml
@@ -0,0 +1,334 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - shell: "docker info --format '{% raw %}{{json .}}{% endraw %}' | python -m json.tool"
+
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - shell: "docker info --format '{% raw %}{{json .}}{% endraw %}' | python -m json.tool"
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ name: default
+ state: present
+ advertise_addr: "{{ ansible_default_ipv4.address | default('127.0.0.1') }}"
+
+ - name: Parameter name should be required
+ docker_config:
+ state: present
+ ignore_errors: true
+ register: output
+
+ - name: Assert failure when called with no name
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "missing required arguments: name"'
+
+ - name: Test parameters
+ docker_config:
+ name: foo
+ state: present
+ ignore_errors: true
+ register: output
+
+ - name: Assert failure when called with no data
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "state is present but any of the following are missing: data, data_src"'
+
+ - name: Create config
+ docker_config:
+ name: db_password
+ data: opensesame!
+ state: present
+ register: output
+
+ - name: Create variable config_id
+ set_fact:
+ config_id: "{{ output.config_id }}"
+
+ - name: Inspect config
+ command: "docker config inspect {{ config_id }}"
+ register: inspect
+ ignore_errors: true
+
+ - debug:
+ var: inspect
+
+ - name: Assert config creation succeeded
+ assert:
+ that:
+ - "'db_password' in inspect.stdout"
+ - "'ansible_key' in inspect.stdout"
+ when: inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in inspect.stderr"
+ when: inspect is failed
+
+ - name: Create config again
+ docker_config:
+ name: db_password
+ data: opensesame!
+ state: present
+ register: output
+
+ - name: Assert create config is idempotent
+ assert:
+ that:
+ - output is not changed
+
+ - name: Write config into file
+ copy:
+ dest: "{{ remote_tmp_dir }}/data"
+ content: |-
+ opensesame!
+
+ - name: Create config again (from file)
+ docker_config:
+ name: db_password
+ data_src: "{{ remote_tmp_dir }}/data"
+ state: present
+ register: output
+
+ - name: Assert create config is idempotent
+ assert:
+ that:
+ - output is not changed
+
+ - name: Create config again (base64)
+ docker_config:
+ name: db_password
+ data: b3BlbnNlc2FtZSE=
+ data_is_b64: true
+ state: present
+ register: output
+
+ - name: Assert create config (base64) is idempotent
+ assert:
+ that:
+ - output is not changed
+
+ - name: Update config
+ docker_config:
+ name: db_password
+ data: newpassword!
+ state: present
+ register: output
+
+ - name: Assert config was updated
+ assert:
+ that:
+ - output is changed
+ - output.config_id != config_id
+
+ - name: Remove config
+ docker_config:
+ name: db_password
+ state: absent
+
+ - name: Check that config is removed
+ command: "docker config inspect {{ config_id }}"
+ register: output
+ ignore_errors: true
+
+ - name: Assert config was removed
+ assert:
+ that:
+ - output is failed
+
+ - name: Remove config
+ docker_config:
+ name: db_password
+ state: absent
+ register: output
+
+ - name: Assert remove config is idempotent
+ assert:
+ that:
+ - output is not changed
+
+# Rolling update
+
+ - name: Create rolling config
+ docker_config:
+ name: rolling_password
+ data: opensesame!
+ rolling_versions: true
+ state: present
+ register: original_output
+
+ - name: Create variable config_id
+ set_fact:
+ config_id: "{{ original_output.config_id }}"
+
+ - name: Inspect config
+ command: "docker config inspect {{ config_id }}"
+ register: inspect
+ ignore_errors: true
+
+ - debug:
+ var: inspect
+
+ - name: Assert config creation succeeded
+ assert:
+ that:
+ - "'rolling_password' in inspect.stdout"
+ - "'ansible_key' in inspect.stdout"
+ - "'ansible_version' in inspect.stdout"
+ - original_output.config_name == 'rolling_password_v1'
+ when: inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in inspect.stderr"
+ when: inspect is failed
+
+ - name: Create config again
+ docker_config:
+ name: rolling_password
+ data: newpassword!
+ rolling_versions: true
+ state: present
+ register: new_output
+
+ - name: Assert that new version is created
+ assert:
+ that:
+ - new_output is changed
+ - new_output.config_id != original_output.config_id
+ - new_output.config_name != original_output.config_name
+ - new_output.config_name == 'rolling_password_v2'
+
+ - name: Remove rolling configs
+ docker_config:
+ name: rolling_password
+ rolling_versions: true
+ state: absent
+
+ - name: Check that config is removed
+ command: "docker config inspect {{ original_output.config_id }}"
+ register: output
+ ignore_errors: true
+
+ - name: Assert config was removed
+ assert:
+ that:
+ - output is failed
+
+ - name: Check that config is removed
+ command: "docker config inspect {{ new_output.config_id }}"
+ register: output
+ ignore_errors: true
+
+ - name: Assert config was removed
+ assert:
+ that:
+ - output is failed
+
+# template_driver tests
+
+ - when: docker_py_version is version('5.0.3', '>=') and docker_api_version is version('1.37', '>=')
+ block:
+
+ - name: Create regular config
+ docker_config:
+ name: db_password
+ data: opensesame!
+ state: present
+
+ - name: Update config with template_driver
+ docker_config:
+ name: db_password
+ data: opensesame!
+ template_driver: golang
+ state: present
+ register: output
+
+ - name: Assert config was updated
+ assert:
+ that:
+ - output is changed
+
+ - name: Invalid template_driver
+ docker_config:
+ name: db_password
+ data: opensesame!
+ template_driver: "not a template driver"
+ state: present
+ ignore_errors: true
+ register: output
+
+ - name: Assert failure when called with invalid template_driver
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "value of template_driver must be one of: golang, got: not a template driver"'
+
+ - name: Create config again
+ docker_config:
+ name: db_password
+ data: opensesame!
+ template_driver: golang
+ state: present
+ register: output
+
+ - name: Assert create config is idempotent
+ assert:
+ that:
+ - output is not changed
+
+ # data is the docker swarm's name
+ - name: Update config with template data
+ docker_config:
+ name: db_password
+ data: "{{ '{{' }} .Service.Name {{ '}}' }}"
+ template_driver: golang
+ state: present
+ register: output
+
+ - name: Inspect config
+ command: "docker config inspect {{ output.config_id }}"
+ register: inspect
+
+ - name: Show inspection result
+ debug:
+ var: inspect
+
+ - name: Assert config creation succeeded
+ assert:
+ that:
+ - "'db_password' in inspect.stdout"
+ - "'ansible_key' in inspect.stdout"
+ # According to the API docs, 'Data' is "Base64-url-safe-encoded (RFC 4648) config data."
+ - "'\"Data\": \"e3sgLlNlcnZpY2UuTmFtZSB9fQ==\"' in inspect.stdout"
+ - "'Templating' in inspect.stdout"
+ - "'\"Name\": \"golang\"' in inspect.stdout"
+
+ - name: Remove config
+ docker_config:
+ name: db_password
+ state: absent
+
+ - name: Check that config is removed
+ command: "docker config inspect {{ output.config_id }}"
+ register: output
+ ignore_errors: true
+
+ - name: Assert config was removed
+ assert:
+ that:
+ - output is failed
+
+ always:
+ - name: Remove a Swarm cluster
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_container/aliases
new file mode 100644
index 00000000..0837c740
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/5
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/files/env-file b/ansible_collections/community/docker/tests/integration/targets/docker_container/files/env-file
new file mode 100644
index 00000000..87bc9dec
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/files/env-file
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+TEST3=val3
+TEST4=val4
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py b/ansible_collections/community/docker/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py
new file mode 100644
index 00000000..f6840a3a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/filter_plugins/ipaddr_tools.py
@@ -0,0 +1,21 @@
+# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+def _normalize_ipaddr(ipaddr):
+ # Import when needed, to allow installation of that module in the test setup
+ import ipaddress
+ return ipaddress.ip_address(ipaddr).compressed
+
+
+class FilterModule(object):
+ """ IP address and network manipulation filters """
+
+ def filters(self):
+ return {
+ 'normalize_ipaddr': _normalize_ipaddr,
+ }
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/main.yml
new file mode 100644
index 00000000..9911452f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/main.yml
@@ -0,0 +1,65 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Gather facts on controller
+ setup:
+ gather_subset: '!all'
+ delegate_to: localhost
+ delegate_facts: true
+ run_once: true
+
+- name: Make sure ipaddress is available on controller
+ pip:
+ name: ipaddress
+ delegate_to: localhost
+ when: hostvars['localhost'].ansible_facts.python.version.major < 3
+
+# Create random name prefix (for containers, networks, ...)
+- name: Create random container name prefix
+ set_fact:
+ cname_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+ cnames: []
+ inames: []
+ dnetworks: []
+
+- debug:
+ msg: "Using container name prefix {{ cname_prefix }}"
+
+# Run the tests
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ with_items: "{{ cnames }}"
+ diff: false
+ - name: "Make sure all images are removed"
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ inames }}"
+ - name: "Make sure all networks are removed"
+ docker_network:
+ name: "{{ item }}"
+ state: absent
+ force: true
+ with_items: "{{ dnetworks }}"
+ diff: false
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run all docker_container tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/comparisons.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/comparisons.yml
new file mode 100644
index 00000000..54f0d4a6
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/comparisons.yml
@@ -0,0 +1,467 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-comparisons' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+####################################################################
+## value ###########################################################
+####################################################################
+
+- name: value
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.com
+ register: value_1
+
+- name: value (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ force_kill: true
+ comparisons:
+ hostname: ignore
+ register: value_2
+
+- name: value (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ force_kill: true
+ comparisons:
+ hostname: strict
+ register: value_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - value_1 is changed
+ - value_2 is not changed
+ - value_3 is changed
+
+####################################################################
+## list ############################################################
+####################################################################
+
+- name: list
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: list_1
+
+- name: list (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 9.9.9.9
+ force_kill: true
+ comparisons:
+ dns_servers: ignore
+ register: list_2
+
+- name: list (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 9.9.9.9
+ force_kill: true
+ comparisons:
+ dns_servers: strict
+ register: list_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - list_1 is changed
+ - list_2 is not changed
+ - list_3 is changed
+
+####################################################################
+## set #############################################################
+####################################################################
+
+- name: set
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1011"
+ register: set_1
+
+- name: set (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1011"
+ - "1012"
+ force_kill: true
+ comparisons:
+ groups: ignore
+ register: set_2
+
+- name: set (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1011"
+ - "1012"
+ force_kill: true
+ comparisons:
+ groups: allow_more_present
+ register: set_3
+
+- name: set (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1012"
+ force_kill: true
+ comparisons:
+ groups: allow_more_present
+ register: set_4
+
+- name: set (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1010"
+ - "1012"
+ force_kill: true
+ comparisons:
+ groups: strict
+ register: set_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - set_1 is changed
+ - set_2 is not changed
+ - set_3 is changed
+ - set_4 is not changed
+ - set_5 is changed
+
+####################################################################
+## set(dict) #######################################################
+####################################################################
+
+- name: set(dict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ register: set_dict_1
+
+- name: set(dict) (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: true
+ comparisons:
+ devices: ignore
+ register: set_dict_2
+
+- name: set(dict) (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: true
+ comparisons:
+ devices: allow_more_present
+ register: set_dict_3
+
+- name: set(dict) (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: true
+ comparisons:
+ devices: allow_more_present
+ register: set_dict_4
+
+- name: set(dict) (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: true
+ comparisons:
+ devices: strict
+ register: set_dict_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - set_dict_1 is changed
+ - set_dict_2 is not changed
+ - set_dict_3 is changed
+ - set_dict_4 is not changed
+ - set_dict_5 is changed
+
+####################################################################
+## dict ############################################################
+####################################################################
+
+- name: dict
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ register: dict_1
+
+- name: dict (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ force_kill: true
+ comparisons:
+ labels: ignore
+ register: dict_2
+
+- name: dict (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ force_kill: true
+ comparisons:
+ labels: allow_more_present
+ register: dict_3
+
+- name: dict (change, allow_more_present)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ force_kill: true
+ comparisons:
+ labels: allow_more_present
+ register: dict_4
+
+- name: dict (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ force_kill: true
+ comparisons:
+ labels: strict
+ register: dict_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - dict_1 is changed
+ - dict_2 is not changed
+ - dict_3 is changed
+ - dict_4 is not changed
+ - dict_5 is changed
+
+####################################################################
+## wildcard ########################################################
+####################################################################
+
+- name: Pull {{ docker_test_image_hello_world }} image to make sure wildcard_2 test succeeds
+ # If the image isn't there, it will pull it and return 'changed'.
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ source: pull
+
+- name: wildcard
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.com
+ stop_timeout: 1
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ register: wildcard_1
+
+- name: wildcard (change, ignore)
+ docker_container:
+ image: "{{ docker_test_image_hello_world }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ stop_timeout: 2
+ labels:
+ ansible.test.1: hello
+ ansible.test.4: ignore
+ force_kill: true
+ comparisons:
+ '*': ignore
+ register: wildcard_2
+
+- name: wildcard (change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ stop_timeout: 1
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ force_kill: true
+ comparisons:
+ '*': strict
+ register: wildcard_3
+
+- name: wildcard (no change, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ hostname: example.org
+ stop_timeout: 1
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ ansible.test.3: ansible
+ force_kill: true
+ comparisons:
+ '*': strict
+ register: wildcard_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - wildcard_1 is changed
+ - wildcard_2 is not changed
+ - wildcard_3 is changed
+ - wildcard_4 is not changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/compatibility.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/compatibility.yml
new file mode 100644
index 00000000..265aacac
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/compatibility.yml
@@ -0,0 +1,122 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-hi' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+####################################################################
+## container_default_behavior: compatibility #######################
+####################################################################
+
+- name: Start container (check)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: compatibility
+ check_mode: true
+ register: start_1
+
+- name: Start container
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: compatibility
+ register: start_2
+
+- name: Start container (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: compatibility
+ register: start_3
+
+- name: Start container (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ state: started
+ container_default_behavior: compatibility
+ check_mode: true
+ register: start_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - start_1 is changed
+ - start_2 is changed
+ - start_3 is not changed
+ - start_4 is not changed
+
+####################################################################
+## container_default_behavior: no_defaults #########################
+####################################################################
+
+- name: Start container (check)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: no_defaults
+ check_mode: true
+ register: start_1
+
+- name: Start container
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: no_defaults
+ register: start_2
+
+- name: Start container (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: no_defaults
+ register: start_3
+
+- name: Start container (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ container_default_behavior: no_defaults
+ check_mode: true
+ register: start_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - start_1 is changed
+ - start_2 is changed
+ - start_3 is not changed
+ - start_4 is not changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/image-ids.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/image-ids.yml
new file mode 100644
index 00000000..76270c68
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/image-ids.yml
@@ -0,0 +1,155 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-iid' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+- name: Pull images
+ docker_image:
+ name: "{{ image }}"
+ source: pull
+ loop:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_alpine }}"
+ loop_control:
+ loop_var: image
+
+- name: Get image ID of {{ docker_test_image_hello_world }} and {{ docker_test_image_alpine }} images
+ docker_image_info:
+ name:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_alpine }}"
+ register: image_info
+
+- assert:
+ that:
+ - image_info.images | length == 2
+
+- name: Print image IDs
+ debug:
+ msg: "{{ docker_test_image_hello_world }}: {{ image_info.images[0].Id }}; {{ docker_test_image_alpine }}: {{ image_info.images[1].Id }}"
+
+- name: Create container with {{ docker_test_image_hello_world }} image via ID
+ docker_container:
+ image: "{{ image_info.images[0].Id }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: true
+ register: create_1
+
+- name: Create container with {{ docker_test_image_hello_world }} image via ID (idempotent)
+ docker_container:
+ image: "{{ image_info.images[0].Id }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: true
+ register: create_2
+
+- name: Create container with {{ docker_test_image_alpine }} image via ID
+ docker_container:
+ image: "{{ image_info.images[1].Id }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: true
+ register: create_3
+
+- name: Create container with {{ docker_test_image_alpine }} image via ID (idempotent)
+ docker_container:
+ image: "{{ image_info.images[1].Id }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: true
+ register: create_4
+
+- name: Untag image
+ # Image will not be deleted since the container still uses it
+ docker_image:
+ name: "{{ docker_test_image_alpine }}"
+ force_absent: true
+ state: absent
+
+- name: Create container with {{ docker_test_image_alpine }} image via name (check mode, will pull, same image)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: present
+ register: create_5
+ check_mode: true
+
+- name: Create container with {{ docker_test_image_alpine }} image via name (will pull, same image)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: present
+ register: create_6
+
+- name: Cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - create_1 is changed
+ - create_2 is not changed
+ - create_3 is changed
+ - create_4 is not changed
+ - create_5 is changed
+ - create_6 is changed
+ - create_6.container.Image == image_info.images[1].Id
+ - create_6.container.Id == create_4.container.Id # make sure container wasn't recreated
+
+- name: Create container with {{ docker_test_image_digest_base }} image via old digest
+ docker_container:
+ image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v1 }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: true
+ register: digest_1
+
+- name: Create container with {{ docker_test_image_digest_base }} image via old digest (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v1 }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: true
+ register: digest_2
+
+- name: Create container with {{ docker_test_image_digest_base }} image via old digest (idempotent, pull)
+ docker_container:
+ image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v1 }}"
+ name: "{{ cname }}"
+ pull: true
+ state: present
+ force_kill: true
+ register: digest_3
+
+- name: Update container with {{ docker_test_image_digest_base }} image via new digest
+ docker_container:
+ image: "{{ docker_test_image_digest_base }}@sha256:{{ docker_test_image_digest_v2 }}"
+ name: "{{ cname }}"
+ state: present
+ force_kill: true
+ register: digest_4
+
+- name: Cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - digest_1 is changed
+ - digest_2 is not changed
+ - digest_3 is not changed
+ - digest_4 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml
new file mode 100644
index 00000000..3ce6691a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/mounts-volumes.yml
@@ -0,0 +1,558 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-mounts' }}"
+ cname_h1: "{{ cname_prefix ~ '-mounts-h1' }}"
+ cname_h2: "{{ cname_prefix ~ '-mounts-h2' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname, cname_h1, cname_h2] }}"
+
+####################################################################
+## keep_volumes ####################################################
+####################################################################
+
+# TODO: - keep_volumes
+
+####################################################################
+## mounts ##########################################################
+####################################################################
+
+- name: mounts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ - source: /
+ target: /whatever
+ type: bind
+ read_only: false
+ register: mounts_1
+
+- name: mounts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /
+ target: /whatever
+ type: bind
+ read_only: false
+ - source: /tmp
+ target: /tmp
+ type: bind
+ register: mounts_2
+
+- name: mounts (less mounts)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ register: mounts_3
+
+- name: mounts (more mounts)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ - source: /tmp
+ target: /somewhereelse
+ type: bind
+ read_only: true
+ force_kill: true
+ register: mounts_4
+
+- name: mounts (different modes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ - source: /tmp
+ target: /somewhereelse
+ type: bind
+ read_only: false
+ force_kill: true
+ register: mounts_5
+
+- name: mounts (endpoint collision)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /home
+ target: /x
+ type: bind
+ - source: /etc
+ target: /x
+ type: bind
+ read_only: false
+ force_kill: true
+ register: mounts_6
+ ignore_errors: true
+
+- name: mounts (anonymous volume)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - target: /tmp
+ type: volume
+ force_kill: true
+ register: mounts_7
+
+- name: mounts (anonymous volume idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - target: /tmp
+ type: volume
+ force_kill: true
+ register: mounts_8
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - mounts_1 is changed
+ - mounts_2 is not changed
+ - mounts_3 is not changed
+ - mounts_4 is changed
+ - mounts_5 is changed
+ - mounts_6 is failed
+ - "'The mount point \"/x\" appears twice in the mounts option' == mounts_6.msg"
+ - mounts_7 is changed
+ - mounts_8 is not changed
+
+####################################################################
+## tmpfs ###########################################################
+####################################################################
+
+- name: tmpfs
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - target: /cache1
+ type: tmpfs
+ tmpfs_mode: "1777"
+ tmpfs_size: "1GB"
+ - target: /cache2
+ type: tmpfs
+ tmpfs_mode: "1777"
+ tmpfs_size: "1GB"
+ force_kill: true
+ register: tmpfs_1
+
+- name: tmpfs (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - target: /cache2
+ type: tmpfs
+ tmpfs_mode: "1777"
+ tmpfs_size: "1GB"
+ - target: /cache1
+ type: tmpfs
+ tmpfs_mode: "1777"
+ tmpfs_size: "1GB"
+ force_kill: true
+ register: tmpfs_2
+
+- name: tmpfs (more mounts)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - target: /cache1
+ type: tmpfs
+ tmpfs_mode: "1777"
+ tmpfs_size: "1GB"
+ - target: /cache2
+ type: tmpfs
+ tmpfs_mode: "1777"
+ tmpfs_size: "1GB"
+ - target: /cache3
+ type: tmpfs
+ tmpfs_mode: "1777"
+ tmpfs_size: "1GB"
+ force_kill: true
+ register: tmpfs_3
+
+- name: tmpfs (change mode)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - target: /cache1
+ type: tmpfs
+ tmpfs_mode: "1700"
+ tmpfs_size: "1GB"
+ force_kill: true
+ register: tmpfs_4
+
+- name: tmpfs (change size)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - target: /cache1
+ type: tmpfs
+ tmpfs_mode: "1700"
+ tmpfs_size: "2GB"
+ force_kill: true
+ register: tmpfs_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - tmpfs_1 is changed
+ - tmpfs_2 is not changed
+ - tmpfs_3 is changed
+ - tmpfs_4 is changed
+ - tmpfs_5 is changed
+
+####################################################################
+## mounts + volumes ################################################
+####################################################################
+
+- name: mounts + volumes
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /
+ target: /whatever
+ type: bind
+ read_only: true
+ volumes:
+ - /tmp:/tmp
+ register: mounts_volumes_1
+
+- name: mounts + volumes (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /
+ target: /whatever
+ type: bind
+ read_only: true
+ volumes:
+ - /tmp:/tmp
+ register: mounts_volumes_2
+
+- name: mounts + volumes (switching)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ read_only: false
+ volumes:
+ - /:/whatever:ro
+ force_kill: true
+ register: mounts_volumes_3
+
+- name: mounts + volumes (collision, should fail)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ mounts:
+ - source: /tmp
+ target: /tmp
+ type: bind
+ read_only: false
+ volumes:
+ - /tmp:/tmp
+ force_kill: true
+ register: mounts_volumes_4
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - mounts_volumes_1 is changed
+ - mounts_volumes_2 is not changed
+ - mounts_volumes_3 is changed
+ - mounts_volumes_4 is failed
+ - "'The mount point \"/tmp\" appears both in the volumes and mounts option' in mounts_volumes_4.msg"
+
+####################################################################
+## volume_driver ###################################################
+####################################################################
+
+- name: volume_driver
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ volume_driver: local
+ state: started
+ register: volume_driver_1
+
+- name: volume_driver (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ volume_driver: local
+ state: started
+ register: volume_driver_2
+
+- name: volume_driver (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ volume_driver: /
+ state: started
+ force_kill: true
+ register: volume_driver_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - volume_driver_1 is changed
+ - volume_driver_2 is not changed
+ - volume_driver_3 is changed
+
+####################################################################
+## volumes #########################################################
+####################################################################
+
+- name: volumes
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/tmp:/tmp"
+ - "/:/whatever:rw,z"
+ - "/anon:rw"
+ register: volumes_1
+
+- name: volumes (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/:/whatever:rw,z"
+ - "/tmp:/tmp"
+ - "/anon:rw"
+ register: volumes_2
+
+- name: volumes (less volumes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/tmp:/tmp"
+ register: volumes_3
+
+- name: volumes (more volumes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/tmp:/tmp"
+ - "/tmp:/somewhereelse:ro,Z"
+ force_kill: true
+ register: volumes_4
+
+- name: volumes (different modes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/tmp:/tmp"
+ - "/tmp:/somewhereelse:ro"
+ force_kill: true
+ register: volumes_5
+
+- name: volumes (collision)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes:
+ - "/etc:/tmp"
+ - "/home:/tmp:ro"
+ force_kill: true
+ register: volumes_6
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - volumes_1 is changed
+ - volumes_1.container.Config.Volumes | length == 1
+ - volumes_1.container.Config.Volumes['/anon:rw'] | length == 0
+ - volumes_2 is not changed
+ - volumes_3 is not changed
+ - volumes_4 is changed
+ - not volumes_4.container.Config.Volumes
+ - volumes_5 is changed
+ - volumes_6 is failed
+ - "'The mount point \"/tmp\" appears twice in the volumes option' in volumes_6.msg"
+
+####################################################################
+## volumes_from ####################################################
+####################################################################
+
+- name: start helpers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ container_name }}"
+ state: started
+ volumes:
+ - "{{ '/tmp:/tmp' if container_name == cname_h1 else '/:/whatever:ro' }}"
+ loop:
+ - "{{ cname_h1 }}"
+ - "{{ cname_h2 }}"
+ loop_control:
+ loop_var: container_name
+
+- name: volumes_from
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes_from: "{{ cname_h1 }}"
+ register: volumes_from_1
+
+- name: volumes_from (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes_from: "{{ cname_h1 }}"
+ register: volumes_from_2
+
+- name: volumes_from (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ volumes_from: "{{ cname_h2 }}"
+ force_kill: true
+ register: volumes_from_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: true
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ - "{{ cname_h2 }}"
+ loop_control:
+ loop_var: container_name
+ diff: false
+
+- assert:
+ that:
+ - volumes_from_1 is changed
+ - volumes_from_2 is not changed
+ - volumes_from_3 is changed
+
+####################################################################
+####################################################################
+####################################################################
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/network.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/network.yml
new file mode 100644
index 00000000..d6a6f0bc
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/network.yml
@@ -0,0 +1,747 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-network' }}"
+ cname_h1: "{{ cname_prefix ~ '-network-h1' }}"
+ nname_1: "{{ cname_prefix ~ '-network-1' }}"
+ nname_2: "{{ cname_prefix ~ '-network-2' }}"
+ nname_3: "{{ cname_prefix ~ '-network-3' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname, cname_h1] }}"
+ dnetworks: "{{ dnetworks + [nname_1, nname_2, nname_3] }}"
+
+- name: Create networks
+ docker_network:
+ name: "{{ network_name }}"
+ state: present
+ loop:
+ - "{{ nname_1 }}"
+ - "{{ nname_2 }}"
+ loop_control:
+ loop_var: network_name
+
+- set_fact:
+ subnet_ipv4_base: 10.{{ 16 + (240 | random) }}.{{ 16 + (240 | random) }}
+ subnet_ipv6_base: fdb6:feea:{{ '%0.4x:%0.4x' | format(65536 | random, 65536 | random) }}
+ # If netaddr would be installed on the controller, one could do:
+ # subnet_ipv4: "10.{{ 16 + (240 | random) }}.{{ 16 + (240 | random) }}.0/24"
+ # subnet_ipv6: "fdb6:feea:{{ '%0.4x:%0.4x' | format(65536 | random, 65536 | random) }}::/64"
+
+- set_fact:
+ subnet_ipv4: "{{ subnet_ipv4_base }}.0/24"
+ subnet_ipv6: "{{ subnet_ipv6_base }}::/64"
+ nname_3_ipv4_2: "{{ subnet_ipv4_base }}.2"
+ nname_3_ipv4_3: "{{ subnet_ipv4_base }}.3"
+ nname_3_ipv4_4: "{{ subnet_ipv4_base }}.4"
+ nname_3_ipv6_2: "{{ subnet_ipv6_base }}::2"
+ nname_3_ipv6_3: "{{ subnet_ipv6_base }}::3"
+ nname_3_ipv6_4: "{{ subnet_ipv6_base }}::4"
+ # If netaddr would be installed on the controller, one could do:
+ # nname_3_ipv4_2: "{{ subnet_ipv4 | ansible.netcommon.next_nth_usable(2) }}"
+ # nname_3_ipv4_3: "{{ subnet_ipv4 | ansible.netcommon.next_nth_usable(3) }}"
+ # nname_3_ipv4_4: "{{ subnet_ipv4 | ansible.netcommon.next_nth_usable(4) }}"
+ # nname_3_ipv6_2: "{{ subnet_ipv6 | ansible.netcommon.next_nth_usable(2) }}"
+ # nname_3_ipv6_3: "{{ subnet_ipv6 | ansible.netcommon.next_nth_usable(3) }}"
+ # nname_3_ipv6_4: "{{ subnet_ipv6 | ansible.netcommon.next_nth_usable(4) }}"
+
+- debug:
+ msg: "Chose random IPv4 subnet {{ subnet_ipv4 }} and random IPv6 subnet {{ subnet_ipv6 }}"
+
+- name: Create network with fixed IPv4 and IPv6 subnets
+ docker_network:
+ name: "{{ nname_3 }}"
+ enable_ipv6: true
+ ipam_config:
+ - subnet: "{{ subnet_ipv4 }}"
+ - subnet: "{{ subnet_ipv6 }}"
+ state: present
+
+####################################################################
+## network_mode ####################################################
+####################################################################
+
+- name: network_mode
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: host
+ register: network_mode_1
+
+- name: network_mode (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: host
+ register: network_mode_2
+
+- name: network_mode (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: none
+ force_kill: true
+ register: network_mode_3
+
+- name: network_mode (container mode setup)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname_h1 }}"
+ state: started
+ register: cname_h1_id
+
+- name: network_mode (container mode)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: "container:{{ cname_h1_id.container.Id }}"
+ force_kill: true
+ register: network_mode_4
+
+- name: network_mode (container mode idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ network_mode: "container:{{ cname_h1 }}"
+ register: network_mode_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: true
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ loop_control:
+ loop_var: container_name
+ diff: false
+
+- assert:
+ that:
+ - network_mode_1 is changed
+ - network_mode_1.container.HostConfig.NetworkMode == 'host'
+ - network_mode_2 is not changed
+ - network_mode_2.container.HostConfig.NetworkMode == 'host'
+ - network_mode_3 is changed
+ - network_mode_3.container.HostConfig.NetworkMode == 'none'
+ - network_mode_4 is changed
+ - network_mode_4.container.HostConfig.NetworkMode == 'container:' ~ cname_h1_id.container.Id
+ - network_mode_5 is not changed
+ - network_mode_5.container.HostConfig.NetworkMode == 'container:' ~ cname_h1_id.container.Id
+
+####################################################################
+## networks, purge_networks for networks_cli_compatible=no #########
+####################################################################
+
+- name: networks_cli_compatible=no, networks w/o purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: false
+ register: networks_1
+
+- name: networks_cli_compatible=no, networks w/o purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: false
+ register: networks_2
+
+- name: networks_cli_compatible=no, networks, purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ comparisons:
+ networks: strict
+ networks:
+ - name: bridge
+ - name: "{{ nname_1 }}"
+ networks_cli_compatible: false
+ force_kill: true
+ register: networks_3
+
+- name: networks_cli_compatible=no, networks, purge_networks (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ comparisons:
+ networks: strict
+ networks:
+ - name: "{{ nname_1 }}"
+ - name: bridge
+ networks_cli_compatible: false
+ register: networks_4
+
+- name: networks_cli_compatible=no, networks (less networks)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: bridge
+ networks_cli_compatible: false
+ register: networks_5
+
+- name: networks_cli_compatible=no, networks, purge_networks (less networks)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ comparisons:
+ networks: strict
+ networks:
+ - name: bridge
+ networks_cli_compatible: false
+ force_kill: true
+ register: networks_6
+
+- name: networks_cli_compatible=no, networks, purge_networks (more networks)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ comparisons:
+ networks: strict
+ networks:
+ - name: bridge
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: false
+ force_kill: true
+ register: networks_7
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ # networks_1 has networks default, 'bridge', nname_1
+ - networks_1 is changed
+ - networks_1.container.NetworkSettings.Networks | length == 3
+ - nname_1 in networks_1.container.NetworkSettings.Networks
+ - nname_2 in networks_1.container.NetworkSettings.Networks
+ - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks"
+ # networks_2 has networks default, 'bridge', nname_1
+ - networks_2 is not changed
+ - networks_2.container.NetworkSettings.Networks | length == 3
+ - nname_1 in networks_2.container.NetworkSettings.Networks
+ - nname_2 in networks_1.container.NetworkSettings.Networks
+ - "'default' in networks_1.container.NetworkSettings.Networks or 'bridge' in networks_1.container.NetworkSettings.Networks"
+ # networks_3 has networks 'bridge', nname_1
+ - networks_3 is changed
+ - networks_3.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_3.container.NetworkSettings.Networks
+ - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks"
+ # networks_4 has networks 'bridge', nname_1
+ - networks_4 is not changed
+ - networks_4.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_4.container.NetworkSettings.Networks
+ - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks"
+ # networks_5 has networks 'bridge', nname_1
+ - networks_5 is not changed
+ - networks_5.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_5.container.NetworkSettings.Networks
+ - "'default' in networks_5.container.NetworkSettings.Networks or 'bridge' in networks_5.container.NetworkSettings.Networks"
+ # networks_6 has networks 'bridge'
+ - networks_6 is changed
+ - networks_6.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks"
+ # networks_7 has networks 'bridge', nname_2
+ - networks_7 is changed
+ - networks_7.container.NetworkSettings.Networks | length == 2
+ - nname_2 in networks_7.container.NetworkSettings.Networks
+ - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks"
+
+####################################################################
+## networks for networks_cli_compatible=yes ########################
+####################################################################
+
+- name: networks_cli_compatible=yes, networks specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ aliases:
+ - alias1
+ - alias2
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: true
+ register: networks_1
+
+- name: networks_cli_compatible=yes, networks specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: true
+ register: networks_2
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- name: networks_cli_compatible=yes, empty networks list specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: true
+ register: networks_3
+
+- name: networks_cli_compatible=yes, empty networks list specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: true
+ register: networks_4
+
+- name: networks_cli_compatible=yes, empty networks list specified, purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: true
+ comparisons:
+ networks: strict
+ force_kill: true
+ register: networks_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- name: networks_cli_compatible=yes, networks not specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks_cli_compatible: true
+ force_kill: true
+ register: networks_6
+
+- name: networks_cli_compatible=yes, networks not specified
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks_cli_compatible: true
+ register: networks_7
+
+- name: networks_cli_compatible=yes, networks not specified, purge_networks
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks_cli_compatible: true
+ purge_networks: true
+ # To replace `purge_networks=true`, we have to specify `networks: []`:
+ # comparisons:
+ # networks: strict
+ # networks: []
+ force_kill: true
+ register: networks_8
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- debug: var=networks_3
+
+- assert:
+ that:
+ # networks_1 has networks nname_1, nname_2
+ - networks_1 is changed
+ - networks_1.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_1.container.NetworkSettings.Networks
+ - nname_2 in networks_1.container.NetworkSettings.Networks
+ # networks_2 has networks nname_1, nname_2
+ - networks_2 is not changed
+ - networks_2.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_2.container.NetworkSettings.Networks
+ - nname_2 in networks_1.container.NetworkSettings.Networks
+ # networks_3 has networks 'bridge'
+ - networks_3 is changed
+ - networks_3.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_3.container.NetworkSettings.Networks or 'bridge' in networks_3.container.NetworkSettings.Networks"
+ # networks_4 has networks 'bridge'
+ - networks_4 is not changed
+ - networks_4.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_4.container.NetworkSettings.Networks or 'bridge' in networks_4.container.NetworkSettings.Networks"
+ # networks_5 has no networks
+ - networks_5 is changed
+ - networks_5.container.NetworkSettings.Networks | length == 0
+ # networks_6 has networks 'bridge'
+ - networks_6 is changed
+ - networks_6.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_6.container.NetworkSettings.Networks or 'bridge' in networks_6.container.NetworkSettings.Networks"
+ # networks_7 has networks 'bridge'
+ - networks_7 is not changed
+ - networks_7.container.NetworkSettings.Networks | length == 1
+ - "'default' in networks_7.container.NetworkSettings.Networks or 'bridge' in networks_7.container.NetworkSettings.Networks"
+ # networks_8 has no networks
+ - networks_8 is changed
+ - networks_8.container.NetworkSettings.Networks | length == 0
+
+####################################################################
+## networks with comparisons #######################################
+####################################################################
+
+- name: create container with one network
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_1 }}"
+ networks_cli_compatible: true
+ register: networks_1
+
+- name: different networks, comparisons=ignore
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: true
+ comparisons:
+ network_mode: ignore # otherwise we'd have to set network_mode to nname_1
+ networks: ignore
+ register: networks_2
+
+- name: less networks, comparisons=ignore
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: true
+ comparisons:
+ networks: ignore
+ register: networks_3
+
+- name: less networks, comparisons=allow_more_present
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: true
+ comparisons:
+ networks: allow_more_present
+ register: networks_4
+
+- name: different networks, comparisons=allow_more_present
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: true
+ comparisons:
+ network_mode: ignore # otherwise we'd have to set network_mode to nname_1
+ networks: allow_more_present
+ force_kill: true
+ register: networks_5
+
+- name: different networks, comparisons=strict
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_2 }}"
+ networks_cli_compatible: true
+ comparisons:
+ networks: strict
+ force_kill: true
+ register: networks_6
+
+- name: less networks, comparisons=strict
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks: []
+ networks_cli_compatible: true
+ comparisons:
+ networks: strict
+ force_kill: true
+ register: networks_7
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ # networks_1 has networks nname_1
+ - networks_1 is changed
+ - networks_1.container.NetworkSettings.Networks | length == 1
+ - nname_1 in networks_1.container.NetworkSettings.Networks
+ # networks_2 has networks nname_1
+ - networks_2 is not changed
+ - networks_2.container.NetworkSettings.Networks | length == 1
+ - nname_1 in networks_2.container.NetworkSettings.Networks
+ # networks_3 has networks nname_1
+ - networks_3 is not changed
+ - networks_3.container.NetworkSettings.Networks | length == 1
+ - nname_1 in networks_3.container.NetworkSettings.Networks
+ # networks_4 has networks nname_1
+ - networks_4 is not changed
+ - networks_4.container.NetworkSettings.Networks | length == 1
+ - nname_1 in networks_4.container.NetworkSettings.Networks
+ # networks_5 has networks nname_1, nname_2
+ - networks_5 is changed
+ - networks_5.container.NetworkSettings.Networks | length == 2
+ - nname_1 in networks_5.container.NetworkSettings.Networks
+ - nname_2 in networks_5.container.NetworkSettings.Networks
+ # networks_6 has networks nname_2
+ - networks_6 is changed
+ - networks_6.container.NetworkSettings.Networks | length == 1
+ - nname_2 in networks_6.container.NetworkSettings.Networks
+ # networks_7 has no networks
+ - networks_7 is changed
+ - networks_7.container.NetworkSettings.Networks | length == 0
+
+####################################################################
+## networks with IP address ########################################
+####################################################################
+
+- name: create container (stopped) with one network and fixed IP
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: stopped
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_2 }}"
+ ipv6_address: "{{ nname_3_ipv6_2 }}"
+ networks_cli_compatible: true
+ register: networks_1
+
+- name: create container (stopped) with one network and fixed IP (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: stopped
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_2 }}"
+ ipv6_address: "{{ nname_3_ipv6_2 }}"
+ networks_cli_compatible: true
+ register: networks_2
+
+- name: create container (stopped) with one network and fixed IP (different IPv4)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: stopped
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_3 }}"
+ ipv6_address: "{{ nname_3_ipv6_2 }}"
+ networks_cli_compatible: true
+ register: networks_3
+
+- name: create container (stopped) with one network and fixed IP (different IPv6)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: stopped
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_3 }}"
+ ipv6_address: "{{ nname_3_ipv6_3 }}"
+ networks_cli_compatible: true
+ register: networks_4
+
+- name: create container (started) with one network and fixed IP
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ register: networks_5
+
+- name: create container (started) with one network and fixed IP (different IPv4)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_4 }}"
+ ipv6_address: "{{ nname_3_ipv6_3 }}"
+ networks_cli_compatible: true
+ force_kill: true
+ register: networks_6
+
+- name: create container (started) with one network and fixed IP (different IPv6)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_4 }}"
+ ipv6_address: "{{ nname_3_ipv6_4 }}"
+ networks_cli_compatible: true
+ force_kill: true
+ register: networks_7
+
+- name: create container (started) with one network and fixed IP (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ networks:
+ - name: "{{ nname_3 }}"
+ ipv4_address: "{{ nname_3_ipv4_4 }}"
+ ipv6_address: "{{ nname_3_ipv6_4 }}"
+ networks_cli_compatible: true
+ register: networks_8
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - networks_1 is changed
+ - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2
+ - networks_1.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr
+ - networks_1.container.NetworkSettings.Networks[nname_3].IPAddress == ""
+ - networks_1.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
+ - networks_2 is not changed
+ - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_2
+ - networks_2.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr
+ - networks_2.container.NetworkSettings.Networks[nname_3].IPAddress == ""
+ - networks_2.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
+ - networks_3 is changed
+ - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3
+ - networks_3.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_2 | normalize_ipaddr
+ - networks_3.container.NetworkSettings.Networks[nname_3].IPAddress == ""
+ - networks_3.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
+ - networks_4 is changed
+ - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3
+ - networks_4.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_4.container.NetworkSettings.Networks[nname_3].IPAddress == ""
+ - networks_4.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address == ""
+ - networks_5 is changed
+ - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_3
+ - networks_5.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_5.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_3
+ - networks_5.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_6 is changed
+ - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4
+ - networks_6.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_6.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4
+ - networks_6.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_3 | normalize_ipaddr
+ - networks_7 is changed
+ - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4
+ - networks_7.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr
+ - networks_7.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4
+ - networks_7.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr
+ - networks_8 is not changed
+ - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv4Address == nname_3_ipv4_4
+ - networks_8.container.NetworkSettings.Networks[nname_3].IPAMConfig.IPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr
+ - networks_8.container.NetworkSettings.Networks[nname_3].IPAddress == nname_3_ipv4_4
+ - networks_8.container.NetworkSettings.Networks[nname_3].GlobalIPv6Address | normalize_ipaddr == nname_3_ipv6_4 | normalize_ipaddr
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete networks
+ docker_network:
+ name: "{{ network_name }}"
+ state: absent
+ force: true
+ loop:
+ - "{{ nname_1 }}"
+ - "{{ nname_2 }}"
+ - "{{ nname_3 }}"
+ loop_control:
+ loop_var: network_name
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/options.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/options.yml
new file mode 100644
index 00000000..1254fb52
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/options.yml
@@ -0,0 +1,4696 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-options' }}"
+ cname_h1: "{{ cname_prefix ~ '-options-h1' }}"
+ cname_h2: "{{ cname_prefix ~ '-options-h2' }}"
+ cname_h3: "{{ cname_prefix ~ '-options-h3' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname, cname_h1, cname_h2, cname_h3] }}"
+
+####################################################################
+## auto_remove #####################################################
+####################################################################
+
+- name: auto_remove
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "echo"'
+ name: "{{ cname }}"
+ state: started
+ auto_remove: true
+ register: auto_remove_1
+
+- name: Give container 1 second to be sure it terminated
+ pause:
+ seconds: 1
+
+- name: auto_remove (verify)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: auto_remove_2
+
+- assert:
+ that:
+ - auto_remove_1 is changed
+ - auto_remove_2 is not changed
+
+####################################################################
+## blkio_weight ####################################################
+####################################################################
+
+- name: blkio_weight
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: 123
+ register: blkio_weight_1
+ ignore_errors: true
+
+- name: blkio_weight (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: 123
+ register: blkio_weight_2
+ ignore_errors: true
+
+- name: blkio_weight (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: 234
+ force_kill: true
+ register: blkio_weight_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- when: blkio_weight_1 is failed
+ assert:
+ that:
+ - "'setting cgroup config for procHooks process caused: failed to write' in blkio_weight_1.msg"
+
+- when: blkio_weight_1 is not failed
+ assert:
+ that:
+ - blkio_weight_1 is changed
+ - blkio_weight_2 is not failed
+ - "blkio_weight_2 is not changed or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (blkio_weight_2.warnings | default([]))"
+ - blkio_weight_3 is not failed
+ - blkio_weight_3 is changed
+
+####################################################################
+## cap_drop, capabilities ##########################################
+####################################################################
+
+- name: capabilities, cap_drop
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+ register: capabilities_1
+
+- name: capabilities, cap_drop (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+ register: capabilities_2
+
+- name: capabilities, cap_drop (less)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ capabilities: []
+ cap_drop:
+ - all
+ register: capabilities_3
+
+- name: capabilities, cap_drop (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ capabilities:
+ - setgid
+ cap_drop:
+ - all
+ force_kill: true
+ register: capabilities_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - capabilities_1 is changed
+ - capabilities_2 is not changed
+ - capabilities_3 is not changed
+ - capabilities_4 is changed
+
+####################################################################
+## cgroupns_mode ###################################################
+####################################################################
+
+- name: cgroupns_mode
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ cgroupns_mode: host
+ register: cgroupns_mode_1
+ ignore_errors: true
+
+- name: cgroupns_mode (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ cgroupns_mode: host
+ register: cgroupns_mode_2
+ ignore_errors: true
+
+- name: cgroupns_mode (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ cgroupns_mode: private
+ register: cgroupns_mode_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - cgroupns_mode_1 is changed
+ - cgroupns_mode_2 is not changed and cgroupns_mode_2 is not failed
+ - >-
+ cgroupns_mode_3 is changed or
+ 'Docker warning: Your kernel does not support cgroup namespaces. Cgroup namespace setting discarded.' in (cgroupns_mode_3.warnings | default([])) or
+ (cgroupns_mode_3 is failed and 'error mounting "cgroup" to rootfs at "/sys/fs/cgroup"' in cgroupns_mode_3.msg)
+ when: docker_api_version is version('1.41', '>=') and cgroupns_mode_1 is not failed
+- assert:
+ that:
+ - >-
+ 'error mounting "cgroup" to rootfs at "/sys/fs/cgroup"' in cgroupns_mode_1.msg
+ when: docker_api_version is version('1.41', '>=') and cgroupns_mode_1 is failed
+- assert:
+ that:
+ - cgroupns_mode_1 is failed
+ - |
+ ('API version is ' ~ docker_api_version ~ '.') in cgroupns_mode_1.msg and 'Minimum version required is 1.41 ' in cgroupns_mode_1.msg
+ when: docker_api_version is version('1.41', '<')
+
+####################################################################
+## cgroup_parent ###################################################
+####################################################################
+
+- name: cgroup_parent
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ cgroup_parent: ''
+ register: cgroup_parent_1
+
+- name: cgroup_parent (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ cgroup_parent: ''
+ register: cgroup_parent_2
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - cgroup_parent_1 is changed
+ - cgroup_parent_2 is not changed
+
+####################################################################
+## command #########################################################
+####################################################################
+
+# old
+
+- name: command (compatibility)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ command: '/bin/sh -v -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: command_1
+
+- name: command (compatibility, idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ command: '/bin/sh -v -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: command_2
+
+- name: command (compatibility, idempotency, list)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ command:
+ - /bin/sh
+ - '-v'
+ - '-c'
+ - '"sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: command_3
+
+- name: command (compatibility, fewer parameters)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: command_4
+
+- name: command (compatibility, empty list)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ command: []
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: command_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - command_1 is changed
+ - command_2 is not changed
+ - command_3 is not changed
+ - command_4 is changed
+ - command_5 is not changed
+
+# new
+
+- name: command (correct)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ command: '/bin/sh -v -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: command_1
+
+- name: command (correct, idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ command: '/bin/sh -v -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: command_2
+
+- name: command (correct, idempotency, list)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ command:
+ - /bin/sh
+ - '-v'
+ - '-c'
+ - sleep 10m
+ name: "{{ cname }}"
+ state: started
+ register: command_3
+
+- name: command (correct, fewer parameters)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: command_4
+
+- name: command (correct, empty list)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ command: []
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: command_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - command_1 is changed
+ - command_2 is not changed
+ - command_3 is not changed
+ - command_4 is changed
+ - command_5 is changed
+
+####################################################################
+## cpu_period ######################################################
+####################################################################
+
+- name: cpu_period
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_period: 90000
+ state: started
+ register: cpu_period_1
+
+- name: cpu_period (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_period: 90000
+ state: started
+ register: cpu_period_2
+
+- name: cpu_period (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_period: 50000
+ state: started
+ force_kill: true
+ register: cpu_period_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - cpu_period_1 is changed
+ - cpu_period_2 is not changed
+ - cpu_period_3 is changed
+
+####################################################################
+## cpu_quota #######################################################
+####################################################################
+
+- name: cpu_quota
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_quota: 150000
+ state: started
+ register: cpu_quota_1
+
+- name: cpu_quota (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_quota: 150000
+ state: started
+ register: cpu_quota_2
+
+- name: cpu_quota (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_quota: 50000
+ state: started
+ force_kill: true
+ register: cpu_quota_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - cpu_quota_1 is changed
+ - cpu_quota_2 is not changed
+ - cpu_quota_3 is changed
+
+####################################################################
+## cpu_shares ######################################################
+####################################################################
+
+- name: cpu_shares
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_shares: 900
+ state: started
+ register: cpu_shares_1
+
+- name: cpu_shares (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_shares: 900
+ state: started
+ register: cpu_shares_2
+
+- name: cpu_shares (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpu_shares: 1100
+ state: started
+ force_kill: true
+ register: cpu_shares_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - cpu_shares_1 is changed
+ - cpu_shares_2 is not changed
+ - cpu_shares_3 is changed
+
+####################################################################
+## cpuset_cpus #####################################################
+####################################################################
+
+- name: cpuset_cpus
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_cpus: "0"
+ state: started
+ register: cpuset_cpus_1
+
+- name: cpuset_cpus (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_cpus: "0"
+ state: started
+ register: cpuset_cpus_2
+
+- name: cpuset_cpus (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_cpus: "1"
+ state: started
+ force_kill: true
+ # This will fail if the system the test is run on doesn't have
+ # multiple CPUs/cores available.
+ ignore_errors: true
+ register: cpuset_cpus_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - cpuset_cpus_1 is changed
+ - cpuset_cpus_2 is not changed
+ - cpuset_cpus_3 is failed or cpuset_cpus_3 is changed
+
+####################################################################
+## cpuset_mems #####################################################
+####################################################################
+
+- name: cpuset_mems
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_mems: "0"
+ state: started
+ register: cpuset_mems_1
+
+- name: cpuset_mems (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_mems: "0"
+ state: started
+ register: cpuset_mems_2
+
+- name: cpuset_mems (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpuset_mems: "1"
+ state: started
+ force_kill: true
+ # This will fail if the system the test is run on doesn't have
+ # multiple MEMs available.
+ ignore_errors: true
+ register: cpuset_mems_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - cpuset_mems_1 is changed
+ - cpuset_mems_2 is not changed
+ - cpuset_mems_3 is failed or cpuset_mems_3 is changed
+
+####################################################################
+## cpus ############################################################
+####################################################################
+
+- name: cpus
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpus: 1
+ state: started
+ register: cpus_1
+
+- name: cpus (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpus: 1
+ state: started
+ register: cpus_2
+
+- name: cpus (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ cpus: 1.5
+ state: started
+ force_kill: true
+ # This will fail if the system the test is run on doesn't have
+ # multiple MEMs available.
+ register: cpus_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - cpus_1 is changed
+ - cpus_2 is not changed and cpus_2 is not failed
+ - cpus_3 is failed or cpus_3 is changed
+
+####################################################################
+## debug ###########################################################
+####################################################################
+
+- name: debug (create)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ debug: true
+ register: debug_1
+
+- name: debug (start)
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ debug: true
+ register: debug_2
+
+- name: debug (stop)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ force_kill: true
+ debug: true
+ register: debug_3
+
+- name: debug (absent)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ debug: true
+ force_kill: true
+ register: debug_4
+
+- assert:
+ that:
+ - debug_1 is changed
+ - debug_2 is changed
+ - debug_3 is changed
+ - debug_4 is changed
+
+####################################################################
+## detach, cleanup #################################################
+####################################################################
+
+- name: detach without cleanup
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_hello_world }}"
+ detach: false
+ register: detach_no_cleanup
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: detach_no_cleanup_cleanup
+ diff: false
+
+- name: detach with cleanup
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_hello_world }}"
+ detach: false
+ cleanup: true
+ register: detach_cleanup
+
+- name: cleanup (unnecessary)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: detach_cleanup_cleanup
+ diff: false
+
+- name: detach with auto_remove and cleanup
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_hello_world }}"
+ detach: false
+ auto_remove: true
+ cleanup: true
+ register: detach_auto_remove
+ ignore_errors: true
+
+- name: cleanup (unnecessary)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: detach_auto_remove_cleanup
+ diff: false
+
+- name: detach with cleanup and non-zero status
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "exit 42"'
+ detach: false
+ cleanup: true
+ register: detach_cleanup_nonzero
+ ignore_errors: true
+
+- assert:
+ that:
+ # NOTE that 'Output' sometimes fails to contain the correct output
+ # of hello-world. We don't know why this happens, but it happens
+ # often enough to be annoying. That's why we disable this for now,
+ # and simply test that 'Output' is contained in the result.
+ - "'Output' in detach_no_cleanup.container"
+ - detach_no_cleanup.status == 0
+ # - "'Hello from Docker!' in detach_no_cleanup.container.Output"
+ - detach_no_cleanup_cleanup is changed
+ - "'Output' in detach_cleanup.container"
+ - detach_cleanup.status == 0
+ # - "'Hello from Docker!' in detach_cleanup.container.Output"
+ - detach_cleanup_cleanup is not changed
+ - detach_cleanup_nonzero is failed
+ - detach_cleanup_nonzero.status == 42
+ - "'Output' in detach_cleanup_nonzero.container"
+ - "detach_cleanup_nonzero.container.Output == ''"
+ - "'Cannot retrieve result as auto_remove is enabled' == detach_auto_remove.container.Output"
+ - detach_auto_remove_cleanup is not changed
+
+####################################################################
+## devices #########################################################
+####################################################################
+
+- name: devices
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ register: devices_1
+
+- name: devices (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/urandom:/dev/virt-urandom:rwm"
+ - "/dev/random:/dev/virt-random:rwm"
+ register: devices_2
+
+- name: devices (less)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ register: devices_3
+
+- name: devices (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ devices:
+ - "/dev/random:/dev/virt-random:rwm"
+ - "/dev/null:/dev/virt-null:rwm"
+ force_kill: true
+ register: devices_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - devices_1 is changed
+ - devices_2 is not changed
+ - devices_3 is not changed
+ - devices_4 is changed
+
+####################################################################
+## device_read_bps #################################################
+####################################################################
+
+- name: device_read_bps
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_bps:
+ - path: /dev/random
+ rate: 20M
+ - path: /dev/urandom
+ rate: 10K
+ register: device_read_bps_1
+ ignore_errors: true
+
+- name: device_read_bps (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_bps:
+ - path: /dev/urandom
+ rate: 10K
+ - path: /dev/random
+ rate: 20M
+ register: device_read_bps_2
+ ignore_errors: true
+
+- name: device_read_bps (lesser entries)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_bps:
+ - path: /dev/random
+ rate: 20M
+ register: device_read_bps_3
+ ignore_errors: true
+
+- name: device_read_bps (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_bps:
+ - path: /dev/random
+ rate: 10M
+ - path: /dev/urandom
+ rate: 5K
+ force_kill: true
+ register: device_read_bps_4
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- when: device_read_bps_1 is not failed
+ assert:
+ that:
+ - device_read_bps_1 is not failed
+ - device_read_bps_1 is changed
+ - device_read_bps_2 is not failed
+ - device_read_bps_2 is not changed
+ - device_read_bps_3 is not failed
+ - device_read_bps_3 is not changed
+ - device_read_bps_4 is not failed
+ - device_read_bps_4 is changed
+
+- when: device_read_bps_1 is failed
+ assert:
+ that:
+ - "'error setting cgroup config for procHooks process' in device_read_bps_1.msg and 'blkio.throttle.read_bps_device: no such device' in device_read_bps_1.msg"
+
+####################################################################
+## device_read_iops ################################################
+####################################################################
+
+- name: device_read_iops
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_iops:
+ - path: /dev/random
+ rate: 10
+ - path: /dev/urandom
+ rate: 20
+ register: device_read_iops_1
+ ignore_errors: true
+
+- name: device_read_iops (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_iops:
+ - path: /dev/urandom
+ rate: "20"
+ - path: /dev/random
+ rate: 10
+ register: device_read_iops_2
+ ignore_errors: true
+
+- name: device_read_iops (less)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_iops:
+ - path: /dev/random
+ rate: 10
+ register: device_read_iops_3
+ ignore_errors: true
+
+- name: device_read_iops (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_read_iops:
+ - path: /dev/random
+ rate: 30
+ - path: /dev/urandom
+ rate: 50
+ force_kill: true
+ register: device_read_iops_4
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- when: device_read_iops_1 is not failed
+ assert:
+ that:
+ - device_read_iops_1 is not failed
+ - device_read_iops_1 is changed
+ - device_read_iops_2 is not failed
+ - device_read_iops_2 is not changed
+ - device_read_iops_3 is not failed
+ - device_read_iops_3 is not changed
+ - device_read_iops_4 is not failed
+ - device_read_iops_4 is changed
+
+- when: device_read_iops_1 is failed
+ assert:
+ that:
+ - "'error setting cgroup config for procHooks process' in device_read_iops_1.msg and 'blkio.throttle.read_iops_device: no such device' in device_read_iops_1.msg"
+
+####################################################################
+## device_write_bps and device_write_iops ##########################
+####################################################################
+
+- name: device_write_bps and device_write_iops
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_write_bps:
+ - path: /dev/random
+ rate: 10M
+ device_write_iops:
+ - path: /dev/urandom
+ rate: 30
+ register: device_write_limit_1
+ ignore_errors: true
+
+- name: device_write_bps and device_write_iops (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_write_bps:
+ - path: /dev/random
+ rate: 10M
+ device_write_iops:
+ - path: /dev/urandom
+ rate: 30
+ register: device_write_limit_2
+ ignore_errors: true
+
+- name: device_write_bps device_write_iops (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_write_bps:
+ - path: /dev/random
+ rate: 20K
+ device_write_iops:
+ - path: /dev/urandom
+ rate: 100
+ force_kill: true
+ register: device_write_limit_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- when: device_write_limit_1 is not failed
+ assert:
+ that:
+ - device_write_limit_1 is not failed and device_write_limit_2 is not failed and device_write_limit_3 is not failed
+ - device_write_limit_1 is changed
+ - device_write_limit_2 is not changed
+ - device_write_limit_3 is changed
+
+- when: device_write_limit_1 is failed
+ assert:
+ that:
+ - "'error setting cgroup config for procHooks process' in device_write_limit_1.msg and 'blkio.throttle.write_bps_device: no such device' in device_write_limit_1.msg"
+
+####################################################################
+## device_requests #################################################
+####################################################################
+
+- name: device_requests
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_requests: []
+ register: device_requests_1
+ ignore_errors: true
+
+- name: device_requests (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ device_requests: []
+ register: device_requests_2
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - device_requests_1 is changed
+ - device_requests_2 is not changed
+ when: docker_api_version is version('1.40', '>=')
+- assert:
+ that:
+ - device_requests_1 is failed
+ - |
+ ('API version is ' ~ docker_api_version ~ '.') in device_requests_1.msg and 'Minimum version required is 1.40 ' in device_requests_1.msg
+ when: docker_api_version is version('1.40', '<')
+
+####################################################################
+## dns_opts ########################################################
+####################################################################
+
+- name: dns_opts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_opts:
+ - "timeout:10"
+ - rotate
+ register: dns_opts_1
+
+- name: dns_opts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_opts:
+ - rotate
+ - "timeout:10"
+ register: dns_opts_2
+
+- name: dns_opts (less resolv.conf options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_opts:
+ - "timeout:10"
+ register: dns_opts_3
+
+- name: dns_opts (more resolv.conf options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_opts:
+ - "timeout:10"
+ - no-check-names
+ force_kill: true
+ register: dns_opts_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - dns_opts_1 is changed
+ - dns_opts_2 is not changed
+ - dns_opts_3 is not changed
+ - dns_opts_4 is changed
+
+####################################################################
+## dns_search_domains ##############################################
+####################################################################
+
+- name: dns_search_domains
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_search_domains:
+ - example.com
+ - example.org
+ register: dns_search_domains_1
+
+- name: dns_search_domains (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_search_domains:
+ - example.com
+ - example.org
+ register: dns_search_domains_2
+
+- name: dns_search_domains (different order)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_search_domains:
+ - example.org
+ - example.com
+ force_kill: true
+ register: dns_search_domains_3
+
+- name: dns_search_domains (changed elements)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_search_domains:
+ - ansible.com
+ - example.com
+ force_kill: true
+ register: dns_search_domains_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - dns_search_domains_1 is changed
+ - dns_search_domains_2 is not changed
+ - dns_search_domains_3 is changed
+ - dns_search_domains_4 is changed
+
+####################################################################
+## dns_servers #####################################################
+####################################################################
+
+- name: dns_servers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: dns_servers_1
+
+- name: dns_servers (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: dns_servers_2
+
+- name: dns_servers (changed order)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 8.8.8.8
+ - 1.1.1.1
+ force_kill: true
+ register: dns_servers_3
+
+- name: dns_servers (changed elements)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ dns_servers:
+ - 8.8.8.8
+ - 9.9.9.9
+ force_kill: true
+ register: dns_servers_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - dns_servers_1 is changed
+ - dns_servers_2 is not changed
+ - dns_servers_3 is changed
+ - dns_servers_4 is changed
+
+####################################################################
+## domainname ######################################################
+####################################################################
+
+- name: domainname
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ domainname: example.com
+ state: started
+ register: domainname_1
+
+- name: domainname (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ domainname: example.com
+ state: started
+ register: domainname_2
+
+- name: domainname (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ domainname: example.org
+ state: started
+ force_kill: true
+ register: domainname_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - domainname_1 is changed
+ - domainname_2 is not changed
+ - domainname_3 is changed
+
+####################################################################
+## entrypoint ######################################################
+####################################################################
+
+# Old
+
+- name: entrypoint (compatibility)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ entrypoint:
+ - /bin/sh
+ - "-v"
+ - "-c"
+ - "'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ register: entrypoint_1
+
+- name: entrypoint (compatibility, idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ entrypoint:
+ - /bin/sh
+ - "-v"
+ - "-c"
+ - "'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ register: entrypoint_2
+
+- name: entrypoint (compatibility, change order, should not be idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ entrypoint:
+ - /bin/sh
+ - "-c"
+ - "'sleep 10m'"
+ - "-v"
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: entrypoint_3
+
+- name: entrypoint (compatibility, fewer parameters)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ entrypoint:
+ - /bin/sh
+ - "-c"
+ - "'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: entrypoint_4
+
+- name: entrypoint (compatibility, other parameters)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ entrypoint:
+ - /bin/sh
+ - "-c"
+ - "'sleep 5m'"
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: entrypoint_5
+
+- name: entrypoint (compatibility, force empty)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: compatibility
+ entrypoint: []
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: entrypoint_6
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - entrypoint_1 is changed
+ - entrypoint_2 is not changed
+ - entrypoint_3 is changed
+ - entrypoint_4 is changed
+ - entrypoint_5 is changed
+ - entrypoint_6 is not changed
+
+# New
+
+- name: entrypoint (correct)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ entrypoint:
+ - /bin/sh
+ - "-v"
+ - "-c"
+ - "sleep 10m"
+ name: "{{ cname }}"
+ state: started
+ register: entrypoint_1
+
+- name: entrypoint (correct, idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ entrypoint:
+ - /bin/sh
+ - "-v"
+ - "-c"
+ - "sleep 10m"
+ name: "{{ cname }}"
+ state: started
+ register: entrypoint_2
+
+- name: entrypoint (correct, change order, should not be idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ entrypoint:
+ - /bin/sh
+ - "-c"
+ - "sleep 10m"
+ - "-v"
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: entrypoint_3
+
+- name: entrypoint (correct, fewer parameters)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ entrypoint:
+ - /bin/sh
+ - "-c"
+ - "sleep 10m"
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: entrypoint_4
+
+- name: entrypoint (correct, other parameters)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ entrypoint:
+ - /bin/sh
+ - "-c"
+ - "sleep 5m"
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: entrypoint_5
+
+- name: entrypoint (correct, force empty)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command_handling: correct
+ entrypoint: []
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: entrypoint_6
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - entrypoint_1 is changed
+ - entrypoint_2 is not changed
+ - entrypoint_3 is changed
+ - entrypoint_4 is changed
+ - entrypoint_5 is changed
+ - entrypoint_6 is changed
+
+####################################################################
+## env #############################################################
+####################################################################
+
+- name: env
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST1: val1
+ TEST2: val2
+ TEST3: "False"
+ TEST4: "true"
+ TEST5: "yes"
+ register: env_1
+
+- name: env (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST2: val2
+ TEST1: val1
+ TEST5: "yes"
+ TEST3: "False"
+ TEST4: "true"
+ register: env_2
+
+- name: env (less environment variables)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST1: val1
+ register: env_3
+
+- name: env (more environment variables)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST1: val1
+ TEST3: val3
+ force_kill: true
+ register: env_4
+
+- name: env (fail unwrapped values)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env:
+ TEST1: true
+ force_kill: true
+ register: env_5
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - env_1 is changed
+ - "'TEST1=val1' in env_1.container.Config.Env"
+ - "'TEST2=val2' in env_1.container.Config.Env"
+ - "'TEST3=False' in env_1.container.Config.Env"
+ - "'TEST4=true' in env_1.container.Config.Env"
+ - "'TEST5=yes' in env_1.container.Config.Env"
+ - env_2 is not changed
+ - env_3 is not changed
+ - "'TEST1=val1' in env_4.container.Config.Env"
+ - "'TEST2=val2' not in env_4.container.Config.Env"
+ - "'TEST3=val3' in env_4.container.Config.Env"
+ - env_4 is changed
+ - env_5 is failed
+ - "('Non-string value found for env option.') in env_5.msg"
+
+####################################################################
+## env_file #########################################################
+####################################################################
+
+- name: Copy env-file
+ copy:
+ src: env-file
+ dest: "{{ remote_tmp_dir }}/env-file"
+
+- name: env_file
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env_file: "{{ remote_tmp_dir }}/env-file"
+ register: env_file_1
+
+- name: env_file (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env_file: "{{ remote_tmp_dir }}/env-file"
+ register: env_file_2
+
+- name: env_file (with env, idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env_file: "{{ remote_tmp_dir }}/env-file"
+ env:
+ TEST3: val3
+ register: env_file_3
+
+- name: env_file (with env)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env_file: "{{ remote_tmp_dir }}/env-file"
+ env:
+ TEST1: val1
+ TEST3: val3
+ force_kill: true
+ register: env_file_4
+
+- name: env_file (with env, idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env_file: "{{ remote_tmp_dir }}/env-file"
+ env:
+ TEST1: val1
+ register: env_file_5
+
+- name: env_file (with env, override)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ env_file: "{{ remote_tmp_dir }}/env-file"
+ env:
+ TEST2: val2
+ TEST4: val4alt
+ force_kill: true
+ register: env_file_6
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - env_file_1 is changed
+ - "'TEST3=val3' in env_file_1.container.Config.Env"
+ - "'TEST4=val4' in env_file_1.container.Config.Env"
+ - env_file_2 is not changed
+ - env_file_3 is not changed
+ - env_file_4 is changed
+ - "'TEST1=val1' in env_file_4.container.Config.Env"
+ - "'TEST3=val3' in env_file_4.container.Config.Env"
+ - "'TEST4=val4' in env_file_4.container.Config.Env"
+ - env_file_5 is not changed
+ - env_file_6 is changed
+ - "'TEST2=val2' in env_file_6.container.Config.Env"
+ - "'TEST3=val3' in env_file_6.container.Config.Env"
+ - "'TEST4=val4alt' in env_file_6.container.Config.Env"
+
+####################################################################
+## etc_hosts #######################################################
+####################################################################
+
+- name: etc_hosts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ etc_hosts:
+ example.com: 1.2.3.4
+ example.org: 4.3.2.1
+ register: etc_hosts_1
+
+- name: etc_hosts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ etc_hosts:
+ example.org: 4.3.2.1
+ example.com: 1.2.3.4
+ register: etc_hosts_2
+
+- name: etc_hosts (less hosts)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ etc_hosts:
+ example.com: 1.2.3.4
+ register: etc_hosts_3
+
+- name: etc_hosts (more hosts)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ etc_hosts:
+ example.com: 1.2.3.4
+ example.us: 1.2.3.5
+ force_kill: true
+ register: etc_hosts_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - etc_hosts_1 is changed
+ - etc_hosts_2 is not changed
+ - etc_hosts_3 is not changed
+ - etc_hosts_4 is changed
+
+####################################################################
+## exposed_ports ###################################################
+####################################################################
+
+- name: exposed_ports
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9002"
+ register: exposed_ports_1
+
+- name: exposed_ports (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9002"
+ - "9001"
+ register: exposed_ports_2
+
+- name: exposed_ports (less ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9002"
+ register: exposed_ports_3
+
+- name: exposed_ports (more ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9002"
+ - "9003"
+ force_kill: true
+ register: exposed_ports_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - exposed_ports_1 is changed
+ - exposed_ports_2 is not changed
+ - exposed_ports_3 is not changed
+ - exposed_ports_4 is changed
+
+####################################################################
+## force_kill ######################################################
+####################################################################
+
+# TODO: - force_kill
+
+####################################################################
+## groups ##########################################################
+####################################################################
+
+- name: groups
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1234"
+ - "5678"
+ register: groups_1
+
+- name: groups (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "5678"
+ - "1234"
+ register: groups_2
+
+- name: groups (less groups)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1234"
+ register: groups_3
+
+- name: groups (more groups)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ groups:
+ - "1234"
+ - "2345"
+ force_kill: true
+ register: groups_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - groups_1 is changed
+ - groups_2 is not changed
+ - groups_3 is not changed
+ - groups_4 is changed
+
+####################################################################
+## healthcheck #####################################################
+####################################################################
+
+- name: healthcheck
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - 1
+ timeout: 2s
+ interval: 0h0m2s3ms4us
+ retries: 2
+ force_kill: true
+ register: healthcheck_1
+
+- name: healthcheck (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - 1
+ timeout: 2s
+ interval: 0h0m2s3ms4us
+ retries: 2
+ force_kill: true
+ register: healthcheck_2
+
+- name: healthcheck (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - 1
+ timeout: 3s
+ interval: 0h1m2s3ms4us
+ retries: 3
+ force_kill: true
+ register: healthcheck_3
+
+- name: healthcheck (no change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: healthcheck_4
+
+- name: healthcheck (disabled)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - NONE
+ force_kill: true
+ register: healthcheck_5
+
+- name: healthcheck (disabled, idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - NONE
+ force_kill: true
+ register: healthcheck_6
+
+- name: healthcheck (disabled, idempotency, strict)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test:
+ - NONE
+ force_kill: true
+ comparisons:
+ '*': strict
+ register: healthcheck_7
+
+- name: healthcheck (string in healthcheck test, changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test: "sleep 1"
+ force_kill: true
+ register: healthcheck_8
+
+- name: healthcheck (string in healthcheck test, idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ healthcheck:
+ test: "sleep 1"
+ force_kill: true
+ register: healthcheck_9
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - healthcheck_1 is changed
+ - healthcheck_2 is not changed
+ - healthcheck_3 is changed
+ - healthcheck_4 is not changed
+ - healthcheck_5 is changed
+ - healthcheck_6 is not changed
+ - healthcheck_7 is not changed
+ - healthcheck_8 is changed
+ - healthcheck_9 is not changed
+
+####################################################################
+## hostname ########################################################
+####################################################################
+
+- name: hostname
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ hostname: me.example.com
+ state: started
+ register: hostname_1
+
+- name: hostname (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ hostname: me.example.com
+ state: started
+ register: hostname_2
+
+- name: hostname (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ hostname: me.example.org
+ state: started
+ force_kill: true
+ register: hostname_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - hostname_1 is changed
+ - hostname_2 is not changed
+ - hostname_3 is changed
+
+####################################################################
+## init ############################################################
+####################################################################
+
+- name: init
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ init: true
+ state: started
+ register: init_1
+
+- name: init (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ init: true
+ state: started
+ register: init_2
+
+- name: init (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ init: false
+ state: started
+ force_kill: true
+ register: init_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - init_1 is changed
+ - init_2 is not changed
+ - init_3 is changed
+
+####################################################################
+## interactive #####################################################
+####################################################################
+
+- name: interactive
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ interactive: true
+ state: started
+ register: interactive_1
+
+- name: interactive (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ interactive: true
+ state: started
+ register: interactive_2
+
+- name: interactive (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ interactive: false
+ state: started
+ force_kill: true
+ register: interactive_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - interactive_1 is changed
+ - interactive_2 is not changed
+ - interactive_3 is changed
+
+####################################################################
+## image / image_comparison / ignore_image #########################
+####################################################################
+
+- name: Pull images to make sure ignore_image test succeeds
+ # If the image isn't there, it will pull it and return 'changed'.
+ docker_image:
+ name: "{{ item }}"
+ source: pull
+ loop:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_registry_nginx }}"
+
+- name: image
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: image_1
+
+- name: image (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: image_2
+ diff: true
+
+- name: ignore_image
+ docker_container:
+ image: "{{ docker_test_image_hello_world }}"
+ comparisons:
+ image: ignore
+ name: "{{ cname }}"
+ state: started
+ register: ignore_image
+ diff: true
+
+- name: ignore_image (labels and env differ in image, image_comparison=current-image)
+ docker_container:
+ image: "{{ docker_test_image_registry_nginx }}"
+ comparisons:
+ image: ignore
+ image_comparison: current-image
+ name: "{{ cname }}"
+ state: started
+ register: ignore_image_2
+ diff: true
+
+- name: ignore_image (labels and env differ in image, image_comparison=desired-image)
+ docker_container:
+ image: "{{ docker_test_image_registry_nginx }}"
+ comparisons:
+ image: ignore
+ image_comparison: desired-image
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: ignore_image_3
+ diff: true
+
+- name: image change
+ docker_container:
+ image: "{{ docker_test_image_hello_world }}"
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: image_change
+ diff: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - image_1 is changed
+ - image_2 is not changed
+ - ignore_image is not changed
+ - ignore_image_2 is not changed
+ - ignore_image_3 is changed
+ - image_change is changed
+
+####################################################################
+## image_label_mismatch ############################################
+####################################################################
+
+- name: Registering image name
+ set_fact:
+ iname_labels: "{{ cname_prefix ~ '-labels' }}"
+- name: Registering image name
+ set_fact:
+ inames: "{{ inames + [iname_labels] }}"
+- name: build image with labels
+ command:
+ cmd: "docker build --label img_label=base --tag {{ iname_labels }} -"
+ stdin: "FROM {{ docker_test_image_alpine }}"
+
+- name: image_label_mismatch
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: image_label_mismatch_1
+
+- name: image_label_mismatch (ignore,unmanaged labels)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: ignore
+ state: started
+ register: image_label_mismatch_2
+
+- name: image_label_mismatch (ignore,missing img label)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: ignore
+ labels: {}
+ state: started
+ register: image_label_mismatch_3
+
+- name: image_label_mismatch (ignore,match img label)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: ignore
+ labels:
+ img_label: base
+ state: started
+ register: image_label_mismatch_4
+
+- name: image_label_mismatch (ignore,mismatched img label)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: ignore
+ labels:
+ img_label: override
+ state: started
+ force_kill: true
+ register: image_label_mismatch_5
+
+- name: image_label_mismatch (ignore,remove img label)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: ignore
+ labels: {}
+ state: started
+ force_kill: true
+ register: image_label_mismatch_6
+
+- name: image_label_mismatch (fail,unmanaged labels)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: fail
+ state: started
+ register: image_label_mismatch_7
+
+- name: image_label_mismatch (fail,non-strict,missing img label)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: fail
+ labels: {}
+ state: started
+ register: image_label_mismatch_8
+
+- name: image_label_mismatch (fail,strict,missing img label)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: fail
+ comparisons:
+ labels: strict
+ labels: {}
+ state: started
+ ignore_errors: true
+ register: image_label_mismatch_9
+
+- name: image_label_mismatch (fail,match img label)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: fail
+ labels:
+ img_label: base
+ state: started
+ register: image_label_mismatch_10
+
+- name: image_label_mismatch (fail,mismatched img label)
+ docker_container:
+ image: "{{ iname_labels }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_label_mismatch: fail
+ labels:
+ img_label: override
+ state: started
+ force_kill: true
+ register: image_label_mismatch_11
+
+- name: cleanup container
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- name: cleanup image
+ docker_image:
+ name: "{{ iname_labels }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - image_label_mismatch_1 is changed
+ - image_label_mismatch_1.container.Config.Labels.img_label == "base"
+ - image_label_mismatch_2 is not changed
+ - image_label_mismatch_3 is not changed
+ - image_label_mismatch_4 is not changed
+ - image_label_mismatch_5 is changed
+ - image_label_mismatch_5.container.Config.Labels.img_label == "override"
+ - image_label_mismatch_6 is changed
+ - image_label_mismatch_6.container.Config.Labels.img_label == "base"
+ - image_label_mismatch_7 is not changed
+ - image_label_mismatch_8 is not changed
+ - image_label_mismatch_9 is failed
+ - >-
+ image_label_mismatch_9.msg == ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore this error. " ~ 'Labels: "img_label"')
+ - image_label_mismatch_10 is not changed
+ - image_label_mismatch_11 is changed
+
+####################################################################
+## image_name_mismatch #############################################
+####################################################################
+
+- name: Pull images to make sure ignore_image test succeeds
+ # If the image isn't there, it will pull it and return 'changed'.
+ docker_image:
+ name: "{{ item }}"
+ source: pull
+ loop:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_registry_nginx }}"
+
+- name: image
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: image_1
+
+- name: image (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: image_2
+ diff: true
+
+- name: ignore_image
+ docker_container:
+ image: "{{ docker_test_image_hello_world }}"
+ ignore_image: true
+ name: "{{ cname }}"
+ state: started
+ register: ignore_image
+ diff: true
+
+- name: ignore_image (labels and env differ in image, image_comparison=current-image)
+ docker_container:
+ image: "{{ docker_test_image_registry_nginx }}"
+ ignore_image: true
+ image_comparison: current-image
+ name: "{{ cname }}"
+ state: started
+ register: ignore_image_2
+ diff: true
+
+- name: ignore_image (labels and env differ in image, image_comparison=desired-image)
+ docker_container:
+ image: "{{ docker_test_image_registry_nginx }}"
+ ignore_image: true
+ image_comparison: desired-image
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: ignore_image_3
+ diff: true
+
+- name: image change
+ docker_container:
+ image: "{{ docker_test_image_hello_world }}"
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: image_change
+ diff: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - image_1 is changed
+ - image_2 is not changed
+ - ignore_image is not changed
+ - ignore_image_2 is not changed
+ - ignore_image_3 is changed
+ - image_change is changed
+
+####################################################################
+## image_name_mismatch #############################################
+####################################################################
+
+- name: Registering image name
+ set_fact:
+ iname_name_mismatch: "{{ cname_prefix ~ '-image-name' }}"
+- name: Registering image name
+ set_fact:
+ inames: "{{ inames + [iname_name_mismatch] }}"
+
+- name: Tag hello world image (pulled earlier) with new name
+ docker_image:
+ name: "{{ docker_test_image_registry_nginx }}"
+ source: local
+ repository: "{{ iname_name_mismatch }}:latest"
+
+- name: image_name_mismatch
+ docker_container:
+ image: "{{ docker_test_image_registry_nginx }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ register: image_name_mismatch_1
+
+- name: image_name_mismatch (ignore)
+ docker_container:
+ image: "{{ iname_name_mismatch }}:latest"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_name_mismatch: ignore
+ state: started
+ register: image_name_mismatch_2
+
+- name: image_name_mismatch (recreate)
+ docker_container:
+ image: "{{ iname_name_mismatch }}:latest"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ image_name_mismatch: recreate
+ state: started
+ force_kill: true
+ register: image_name_mismatch_3
+
+- name: Cleanup container
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- name: Cleanup image
+ docker_image:
+ name: "{{ iname_name_mismatch }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - image_name_mismatch_1 is changed
+ - image_name_mismatch_2 is not changed
+ - image_name_mismatch_3 is changed
+ - image_name_mismatch_3.container.Image == image_name_mismatch_2.container.Image
+
+####################################################################
+## ipc_mode ########################################################
+####################################################################
+
+- name: start helpers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ container_name }}"
+ state: started
+ ipc_mode: shareable
+ loop:
+ - "{{ cname_h1 }}"
+ loop_control:
+ loop_var: container_name
+
+- name: ipc_mode
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ipc_mode: "container:{{ cname_h1 }}"
+ # ipc_mode: shareable
+ register: ipc_mode_1
+
+- name: ipc_mode (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ipc_mode: "container:{{ cname_h1 }}"
+ # ipc_mode: shareable
+ register: ipc_mode_2
+
+- name: ipc_mode (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ipc_mode: private
+ force_kill: true
+ register: ipc_mode_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: true
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ loop_control:
+ loop_var: container_name
+ diff: false
+
+- assert:
+ that:
+ - ipc_mode_1 is changed
+ - ipc_mode_2 is not changed
+ - ipc_mode_3 is changed
+
+####################################################################
+## kernel_memory ###################################################
+####################################################################
+
+- name: kernel_memory
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ kernel_memory: 8M
+ state: started
+ register: kernel_memory_1
+ ignore_errors: true
+
+- name: kernel_memory (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ kernel_memory: 8M
+ state: started
+ register: kernel_memory_2
+ ignore_errors: true
+
+- name: kernel_memory (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ kernel_memory: 6M
+ state: started
+ force_kill: true
+ register: kernel_memory_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - kernel_memory_1 is changed
+ - kernel_memory_2 is not changed
+ - kernel_memory_3 is changed
+ when:
+ - kernel_memory_1 is not failed or 'kernel memory accounting disabled in this runc build' not in kernel_memory_1.msg
+ - "'Docker warning: Specifying a kernel memory limit is deprecated and will be removed in a future release.' not in (kernel_memory_1.warnings | default([]))"
+ # API version 1.42 seems to remove the kernel memory option completely
+ - "'KernelMemory' in kernel_memory_1.container.HostConfig or docker_api_version is version('1.42', '<')"
+
+####################################################################
+## kill_signal #####################################################
+####################################################################
+
+# TODO: - kill_signal
+
+####################################################################
+## labels ##########################################################
+####################################################################
+
+- name: labels
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ register: labels_1
+
+- name: labels (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.2: world
+ ansible.test.1: hello
+ register: labels_2
+
+- name: labels (less labels)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ register: labels_3
+
+- name: labels (more labels)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ force_kill: true
+ register: labels_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - labels_1 is changed
+ - labels_2 is not changed
+ - labels_3 is not changed
+ - labels_4 is changed
+
+####################################################################
+## links ###########################################################
+####################################################################
+
+- name: start helpers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ container_name }}"
+ state: started
+ loop:
+ - "{{ cname_h1 }}"
+ - "{{ cname_h2 }}"
+ - "{{ cname_h3 }}"
+ loop_control:
+ loop_var: container_name
+
+- name: links
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ links:
+ - "{{ cname_h1 }}:test1"
+ - "{{ cname_h2 }}:test2"
+ register: links_1
+
+- name: links (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ links:
+ - "{{ cname_h2 }}:test2"
+ - "{{ cname_h1 }}:test1"
+ register: links_2
+
+- name: links (less links)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ links:
+ - "{{ cname_h1 }}:test1"
+ register: links_3
+
+- name: links (more links)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ links:
+ - "{{ cname_h1 }}:test1"
+ - "{{ cname_h3 }}:test3"
+ force_kill: true
+ register: links_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: true
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ - "{{ cname_h2 }}"
+ - "{{ cname_h3 }}"
+ loop_control:
+ loop_var: container_name
+ diff: false
+
+- assert:
+ that:
+ - links_1 is changed
+ - links_2 is not changed
+ - links_3 is not changed
+ - links_4 is changed
+
+####################################################################
+## log_driver ######################################################
+####################################################################
+
+- name: log_driver
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ register: log_driver_1
+
+- name: log_driver (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ register: log_driver_2
+
+- name: log_driver (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: syslog
+ force_kill: true
+ register: log_driver_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - log_driver_1 is changed
+ - log_driver_2 is not changed
+ - log_driver_3 is changed
+
+####################################################################
+## log_options #####################################################
+####################################################################
+
+- name: log_options
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ log_options:
+ labels: production_status
+ env: os,customer
+ max-file: 5
+ register: log_options_1
+
+- name: log_options (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ log_options:
+ env: os,customer
+ labels: production_status
+ max-file: 5
+ register: log_options_2
+
+- name: log_options (less log options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ log_options:
+ labels: production_status
+ register: log_options_3
+
+- name: log_options (more log options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ log_driver: json-file
+ log_options:
+ labels: production_status
+ max-size: 10m
+ force_kill: true
+ register: log_options_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - log_options_1 is changed
+ - log_options_2 is not changed
+ - "'Non-string value found for log_options option \\'max-file\\'. The value is automatically converted to \\'5\\'. If this is not correct, or you want to
+avoid such warnings, please quote the value.' in (log_options_2.warnings | default([]))"
+ - log_options_3 is not changed
+ - log_options_4 is changed
+
+####################################################################
+## mac_address #####################################################
+####################################################################
+
+- name: mac_address
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ mac_address: 92:d0:c6:0a:29:33
+ state: started
+ register: mac_address_1
+
+- name: mac_address (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ mac_address: 92:d0:c6:0a:29:33
+ state: started
+ register: mac_address_2
+
+- name: mac_address (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ mac_address: 92:d0:c6:0a:29:44
+ state: started
+ force_kill: true
+ register: mac_address_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - mac_address_1 is changed
+ - mac_address_2 is not changed
+ - mac_address_3 is changed
+
+####################################################################
+## memory ##########################################################
+####################################################################
+
+- name: memory
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory: 64M
+ state: started
+ register: memory_1
+
+- name: memory (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory: 64M
+ state: started
+ register: memory_2
+
+- name: memory (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory: 48M
+ state: started
+ force_kill: true
+ register: memory_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - memory_1 is changed
+ - memory_2 is not changed
+ - memory_3 is changed
+
+####################################################################
+## memory_reservation ##############################################
+####################################################################
+
+- name: memory_reservation
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_reservation: 64M
+ state: started
+ register: memory_reservation_1
+
+- name: memory_reservation (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_reservation: 64M
+ state: started
+ register: memory_reservation_2
+
+- name: memory_reservation (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_reservation: 48M
+ state: started
+ force_kill: true
+ register: memory_reservation_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - memory_reservation_1 is changed
+ - memory_reservation_2 is not changed
+ - memory_reservation_3 is changed
+
+####################################################################
+## memory_swap #####################################################
+####################################################################
+
+- name: memory_swap
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ # Docker daemon does not accept memory_swap if memory is not specified
+ memory: 32M
+ memory_swap: 64M
+ state: started
+ debug: true
+ register: memory_swap_1
+
+- name: memory_swap (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ # Docker daemon does not accept memory_swap if memory is not specified
+ memory: 32M
+ memory_swap: 64M
+ state: started
+ debug: true
+ register: memory_swap_2
+
+- name: memory_swap (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ # Docker daemon does not accept memory_swap if memory is not specified
+ memory: 32M
+ memory_swap: 48M
+ state: started
+ force_kill: true
+ debug: true
+ register: memory_swap_3
+
+- name: memory_swap (unlimited)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ # Docker daemon does not accept memory_swap if memory is not specified
+ memory: 32M
+ memory_swap: unlimited
+ state: started
+ force_kill: true
+ debug: true
+ register: memory_swap_4
+
+- name: memory_swap (unlimited via -1)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ # Docker daemon does not accept memory_swap if memory is not specified
+ memory: 32M
+ memory_swap: -1
+ state: started
+ force_kill: true
+ debug: true
+ register: memory_swap_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - memory_swap_1 is changed
+ # Sometimes (in particular during integration tests, maybe when not running
+ # on a proper VM), memory_swap cannot be set and will be -1 afterwards.
+ - memory_swap_2 is not changed or memory_swap_2.container.HostConfig.MemorySwap == -1
+ - memory_swap_3 is changed
+ # Unlimited memory_swap (using 'unlimited') should be allowed
+ # (If the value was already -1 because of the above reasons, it won't change)
+ - (memory_swap_4 is changed or memory_swap_3.container.HostConfig.MemorySwap == -1) and memory_swap_4.container.HostConfig.MemorySwap == -1
+ # Unlimited memory_swap (using '-1') should be allowed
+ - memory_swap_5 is not changed and memory_swap_5.container.HostConfig.MemorySwap == -1
+
+- debug: var=memory_swap_1
+ when: memory_swap_2 is changed
+- debug: var=memory_swap_2
+ when: memory_swap_2 is changed
+- debug: var=memory_swap_3
+ when: memory_swap_2 is changed
+
+####################################################################
+## memory_swappiness ###############################################
+####################################################################
+
+- name: memory_swappiness
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_swappiness: 40
+ state: started
+ register: memory_swappiness_1
+
+- name: memory_swappiness (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_swappiness: 40
+ state: started
+ register: memory_swappiness_2
+
+- name: memory_swappiness (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ memory_swappiness: 60
+ state: started
+ force_kill: true
+ register: memory_swappiness_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - memory_swappiness_1 is changed
+ - memory_swappiness_2 is not changed
+ - memory_swappiness_3 is changed
+ when: "'Docker warning: Your kernel does not support memory swappiness capabilities or the cgroup is not mounted. Memory swappiness discarded.' not in (memory_swappiness_1.warnings | default([]))"
+
+####################################################################
+## oom_killer ######################################################
+####################################################################
+
+- name: oom_killer
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_killer: true
+ state: started
+ register: oom_killer_1
+
+- name: oom_killer (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_killer: true
+ state: started
+ register: oom_killer_2
+
+- name: oom_killer (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_killer: false
+ state: started
+ force_kill: true
+ register: oom_killer_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - oom_killer_1 is changed
+ - oom_killer_2 is not changed
+ - oom_killer_3 is changed
+ when: "'Docker warning: Your kernel does not support OomKillDisable. OomKillDisable discarded.' not in (oom_killer_1.warnings | default([]))"
+
+####################################################################
+## oom_score_adj ###################################################
+####################################################################
+
+- name: oom_score_adj
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_score_adj: 5
+ state: started
+ register: oom_score_adj_1
+
+- name: oom_score_adj (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_score_adj: 5
+ state: started
+ register: oom_score_adj_2
+
+- name: oom_score_adj (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ oom_score_adj: 7
+ state: started
+ force_kill: true
+ register: oom_score_adj_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - oom_score_adj_1 is changed
+ - oom_score_adj_2 is not changed
+ - oom_score_adj_3 is changed
+ when: "'Docker warning: Your kernel does not support OomScoreAdj. OomScoreAdj discarded.' not in (oom_score_adj_1.warnings | default([]))"
+
+####################################################################
+## output_logs #####################################################
+####################################################################
+
+# TODO: - output_logs
+
+####################################################################
+## paused ##########################################################
+####################################################################
+
+- name: paused
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: "/bin/sh -c 'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ paused: true
+ force_kill: true
+ register: paused_1
+
+- name: inspect paused
+ command: "docker inspect -f {% raw %}'{{.State.Status}} {{.State.Paused}}'{% endraw %} {{ cname }}"
+ register: paused_2
+
+- name: paused (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: "/bin/sh -c 'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ paused: true
+ force_kill: true
+ register: paused_3
+
+- name: paused (continue)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: "/bin/sh -c 'sleep 10m'"
+ name: "{{ cname }}"
+ state: started
+ paused: false
+ force_kill: true
+ register: paused_4
+
+- name: inspect paused
+ command: "docker inspect -f {% raw %}'{{.State.Status}} {{.State.Paused}}'{% endraw %} {{ cname }}"
+ register: paused_5
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - paused_1 is changed
+ - 'paused_2.stdout == "paused true"'
+ - paused_3 is not changed
+ - paused_4 is changed
+ - 'paused_5.stdout == "running false"'
+
+####################################################################
+## pid_mode ########################################################
+####################################################################
+
+- name: start helpers
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname_h1 }}"
+ state: started
+ register: pid_mode_helper
+
+- name: pid_mode
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pid_mode: "container:{{ pid_mode_helper.container.Id }}"
+ register: pid_mode_1
+
+- name: pid_mode (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pid_mode: "container:{{ cname_h1 }}"
+ register: pid_mode_2
+
+- name: pid_mode (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pid_mode: host
+ force_kill: true
+ register: pid_mode_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: true
+ loop:
+ - "{{ cname }}"
+ - "{{ cname_h1 }}"
+ loop_control:
+ loop_var: container_name
+ diff: false
+
+- assert:
+ that:
+ - pid_mode_1 is changed
+ - pid_mode_2 is not changed
+ - pid_mode_3 is changed
+
+####################################################################
+## pids_limit ######################################################
+####################################################################
+
+- name: pids_limit
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pids_limit: 10
+ register: pids_limit_1
+
+- name: pids_limit (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pids_limit: 10
+ register: pids_limit_2
+
+- name: pids_limit (changed)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ pids_limit: 20
+ force_kill: true
+ register: pids_limit_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - pids_limit_1 is changed
+ - pids_limit_2 is not changed
+ - pids_limit_3 is changed
+
+####################################################################
+## platform ########################################################
+####################################################################
+
+- name: Remove hello-world image
+ docker_image:
+ name: hello-world:latest
+ state: absent
+
+- name: platform
+ docker_container:
+ image: hello-world:latest
+ name: "{{ cname }}"
+ state: present
+ pull: true
+ platform: linux/amd64
+ debug: true
+ register: platform_1
+ ignore_errors: true
+
+- name: platform (idempotency)
+ docker_container:
+ image: hello-world:latest
+ name: "{{ cname }}"
+ state: present
+ # The container always reports 'linux' as platform instead of 'linux/amd64'...
+ platform: linux
+ debug: true
+ register: platform_2
+ ignore_errors: true
+
+- name: platform (changed)
+ docker_container:
+ image: hello-world:latest
+ name: "{{ cname }}"
+ state: present
+ pull: true
+ platform: linux/386
+ force_kill: true
+ debug: true
+ comparisons:
+ # Do not restart because of the changed image ID
+ image: ignore
+ register: platform_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - platform_1 is changed
+ - platform_2 is not changed and platform_2 is not failed
+ - platform_3 is changed
+ when: docker_api_version is version('1.41', '>=')
+- assert:
+ that:
+ - platform_1 is failed
+ - |
+ ('API version is ' ~ docker_api_version ~ '.') in platform_1.msg and 'Minimum version required is 1.41 ' in platform_1.msg
+ when: docker_api_version is version('1.41', '<')
+
+####################################################################
+## privileged ######################################################
+####################################################################
+
+- name: privileged
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ privileged: true
+ state: started
+ register: privileged_1
+
+- name: privileged (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ privileged: true
+ state: started
+ register: privileged_2
+
+- name: privileged (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ privileged: false
+ state: started
+ force_kill: true
+ register: privileged_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - privileged_1 is changed
+ - privileged_2 is not changed
+ - privileged_3 is changed
+
+####################################################################
+## published_ports and default_host_ip #############################
+####################################################################
+
+- name: published_ports
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9001'
+ - '9002'
+ register: published_ports_1
+
+- name: published_ports (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9002'
+ - '9001'
+ register: published_ports_2
+
+- name: published_ports (less published_ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9002'
+ register: published_ports_3
+
+- name: published_ports (more published_ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9002'
+ - '9003'
+ force_kill: true
+ register: published_ports_4
+
+- name: published_ports (ports with IP addresses)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '127.0.0.1:9002:9002/tcp'
+ - '[::1]:9003:9003/tcp'
+ - '[fe80::1%test]:90:90/tcp'
+ force_kill: true
+ register: published_ports_5
+
+- name: published_ports (ports with IP addresses, idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '127.0.0.1:9002:9002/tcp'
+ - '[::1]:9003:9003/tcp'
+ - '[fe80::1%test]:90:90/tcp'
+ register: published_ports_6
+
+- name: published_ports (no published ports)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports: []
+ comparisons:
+ published_ports: strict
+ force_kill: true
+ register: published_ports_7
+
+- name: published_ports (default_host_ip not set)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9001'
+ - '9002'
+ force_kill: true
+ register: published_ports_8
+
+- name: published_ports (default_host_ip set to empty string)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9002'
+ - '9001'
+ default_host_ip: ''
+ force_kill: true
+ register: published_ports_9
+
+- name: published_ports (default_host_ip set to empty string, idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9002'
+ - '9001'
+ default_host_ip: ''
+ register: published_ports_10
+
+- name: published_ports (default_host_ip unset)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - '9002'
+ - '9001'
+ force_kill: true
+ register: published_ports_11
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - published_ports_1 is changed
+ - published_ports_2 is not changed
+ - published_ports_3 is not changed
+ - published_ports_4 is changed
+ - published_ports_5 is changed
+ - published_ports_6 is not changed
+ - published_ports_7 is changed
+ - published_ports_8 is changed
+ - published_ports_9 is changed
+ - published_ports_10 is not changed
+ - published_ports_11 is changed
+
+####################################################################
+## pull ############################################################
+####################################################################
+
+# TODO: - pull
+
+####################################################################
+## read_only #######################################################
+####################################################################
+
+- name: read_only
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ read_only: true
+ state: started
+ register: read_only_1
+
+- name: read_only (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ read_only: true
+ state: started
+ register: read_only_2
+
+- name: read_only (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ read_only: false
+ state: started
+ force_kill: true
+ register: read_only_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - read_only_1 is changed
+ - read_only_2 is not changed
+ - read_only_3 is changed
+
+####################################################################
+## restart_policy ##################################################
+####################################################################
+
+- name: restart_policy
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: always
+ state: started
+ register: restart_policy_1
+
+- name: restart_policy (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: always
+ state: started
+ register: restart_policy_2
+
+- name: restart_policy (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: unless-stopped
+ state: started
+ force_kill: true
+ register: restart_policy_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - restart_policy_1 is changed
+ - restart_policy_2 is not changed
+ - restart_policy_3 is changed
+
+####################################################################
+## restart_retries #################################################
+####################################################################
+
+- name: restart_retries
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: on-failure
+ restart_retries: 5
+ state: started
+ register: restart_retries_1
+
+- name: restart_retries (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: on-failure
+ restart_retries: 5
+ state: started
+ register: restart_retries_2
+
+- name: restart_retries (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart_policy: on-failure
+ restart_retries: 2
+ state: started
+ force_kill: true
+ register: restart_retries_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - restart_retries_1 is changed
+ - restart_retries_2 is not changed
+ - restart_retries_3 is changed
+
+####################################################################
+## runtime #########################################################
+####################################################################
+
+- name: runtime
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ runtime: runc
+ state: started
+ register: runtime_1
+
+- name: runtime (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ runtime: runc
+ state: started
+ register: runtime_2
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - runtime_1 is changed
+ - runtime_2 is not changed
+
+####################################################################
+## security_opts ###################################################
+####################################################################
+
+# In case some of the options stop working, here are some more
+# options which *currently* work with all integration test targets:
+# no-new-privileges
+# label:disable
+# label=disable
+# label:level:s0:c100,c200
+# label=level:s0:c100,c200
+# label:type:svirt_apache_t
+# label=type:svirt_apache_t
+# label:user:root
+# label=user:root
+# seccomp:unconfined
+# seccomp=unconfined
+# apparmor:docker-default
+# apparmor=docker-default
+
+- name: security_opts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ security_opts:
+ - "label:level:s0:c100,c200"
+ - "no-new-privileges"
+ register: security_opts_1
+
+- name: security_opts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ security_opts:
+ - "no-new-privileges"
+ - "label:level:s0:c100,c200"
+ register: security_opts_2
+
+- name: security_opts (less security options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ security_opts:
+ - "no-new-privileges"
+ register: security_opts_3
+
+- name: security_opts (more security options)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ security_opts:
+ - "label:disable"
+ - "no-new-privileges"
+ force_kill: true
+ register: security_opts_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - security_opts_1 is changed
+ - security_opts_2 is not changed
+ - security_opts_3 is not changed
+ - security_opts_4 is changed
+
+####################################################################
+## shm_size ########################################################
+####################################################################
+
+- name: shm_size
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ shm_size: 96M
+ state: started
+ register: shm_size_1
+
+- name: shm_size (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ shm_size: 96M
+ state: started
+ register: shm_size_2
+
+- name: shm_size (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ shm_size: 75M
+ state: started
+ force_kill: true
+ register: shm_size_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - shm_size_1 is changed
+ - shm_size_2 is not changed
+ - shm_size_3 is changed
+
+####################################################################
+## stop_signal #####################################################
+####################################################################
+
+- name: stop_signal
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_signal: "30"
+ state: started
+ register: stop_signal_1
+
+- name: stop_signal (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_signal: "30"
+ state: started
+ register: stop_signal_2
+
+- name: stop_signal (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_signal: "9"
+ state: started
+ force_kill: true
+ register: stop_signal_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - stop_signal_1 is changed
+ - stop_signal_2 is not changed
+ - stop_signal_3 is changed
+
+####################################################################
+## stop_timeout ####################################################
+####################################################################
+
+- name: stop_timeout
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_timeout: 2
+ state: started
+ register: stop_timeout_1
+
+- name: stop_timeout (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_timeout: 2
+ state: started
+ register: stop_timeout_2
+
+- name: stop_timeout (no change)
+ # stop_timeout changes are ignored by default
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ stop_timeout: 1
+ state: started
+ register: stop_timeout_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - stop_timeout_1 is changed
+ - stop_timeout_2 is not changed
+ - stop_timeout_3 is not changed
+
+####################################################################
+## storage_opts ####################################################
+####################################################################
+
+- name: storage_opts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ storage_opts:
+ size: 12m
+ state: started
+ register: storage_opts_1
+ ignore_errors: true
+
+- name: storage_opts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ storage_opts:
+ size: 12m
+ state: started
+ register: storage_opts_2
+ ignore_errors: true
+
+- name: storage_opts (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ storage_opts:
+ size: 24m
+ state: started
+ force_kill: true
+ register: storage_opts_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - storage_opts_1 is changed
+ - storage_opts_2 is not failed and storage_opts_2 is not changed
+ - storage_opts_3 is not failed and storage_opts_3 is changed
+ when: storage_opts_1 is not failed
+
+- assert:
+ that:
+ - "'is supported only for' in storage_opts_1.msg"
+ - storage_opts_2 is failed
+ - storage_opts_3 is failed
+ when: storage_opts_1 is failed
+
+####################################################################
+## sysctls #########################################################
+####################################################################
+
+# In case some of the options stop working, here are some more
+# options which *currently* work with all integration test targets:
+# net.ipv4.conf.default.log_martians: 1
+# net.ipv4.conf.default.secure_redirects: 0
+# net.ipv4.conf.default.send_redirects: 0
+# net.ipv4.conf.all.log_martians: 1
+# net.ipv4.conf.all.accept_redirects: 0
+# net.ipv4.conf.all.secure_redirects: 0
+# net.ipv4.conf.all.send_redirects: 0
+
+- name: sysctls
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ sysctls:
+ net.ipv4.icmp_echo_ignore_all: 1
+ net.ipv4.ip_forward: 1
+ register: sysctls_1
+
+- name: sysctls (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ sysctls:
+ net.ipv4.ip_forward: 1
+ net.ipv4.icmp_echo_ignore_all: 1
+ register: sysctls_2
+
+- name: sysctls (less sysctls)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ sysctls:
+ net.ipv4.icmp_echo_ignore_all: 1
+ register: sysctls_3
+
+- name: sysctls (more sysctls)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ sysctls:
+ net.ipv4.icmp_echo_ignore_all: 1
+ net.ipv6.conf.default.accept_redirects: 0
+ force_kill: true
+ register: sysctls_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - sysctls_1 is changed
+ - sysctls_2 is not changed
+ - sysctls_3 is not changed
+ - sysctls_4 is changed
+
+####################################################################
+## tmpfs ###########################################################
+####################################################################
+
+- name: tmpfs
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ tmpfs:
+ - "/test1:rw,noexec,nosuid,size=65536k"
+ - "/test2:rw,noexec,nosuid,size=65536k"
+ register: tmpfs_1
+
+- name: tmpfs (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ tmpfs:
+ - "/test2:rw,noexec,nosuid,size=65536k"
+ - "/test1:rw,noexec,nosuid,size=65536k"
+ register: tmpfs_2
+
+- name: tmpfs (less tmpfs)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ tmpfs:
+ - "/test1:rw,noexec,nosuid,size=65536k"
+ register: tmpfs_3
+
+- name: tmpfs (more tmpfs)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ tmpfs:
+ - "/test1:rw,noexec,nosuid,size=65536k"
+ - "/test3:rw,noexec,nosuid,size=65536k"
+ force_kill: true
+ register: tmpfs_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - tmpfs_1 is changed
+ - tmpfs_2 is not changed
+ - tmpfs_3 is not changed
+ - tmpfs_4 is changed
+
+####################################################################
+## tty #############################################################
+####################################################################
+
+- name: tty
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ tty: true
+ state: started
+ register: tty_1
+ ignore_errors: true
+
+- name: tty (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ tty: true
+ state: started
+ register: tty_2
+ ignore_errors: true
+
+- name: tty (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ tty: false
+ state: started
+ force_kill: true
+ register: tty_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - tty_1 is changed
+ - tty_2 is not changed and tty_2 is not failed
+ - tty_3 is changed
+ when: tty_1 is not failed
+
+- assert:
+ that:
+ - "'error during container init: open /dev/pts/' in tty_1.msg"
+ - "': operation not permitted: ' in tty_1.msg"
+ when: tty_1 is failed
+
+####################################################################
+## ulimits #########################################################
+####################################################################
+
+- name: ulimits
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ulimits:
+ - "nofile:1234:1234"
+ - "nproc:3:6"
+ register: ulimits_1
+
+- name: ulimits (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ulimits:
+ - "nproc:3:6"
+ - "nofile:1234:1234"
+ register: ulimits_2
+
+- name: ulimits (less ulimits)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ulimits:
+ - "nofile:1234:1234"
+ register: ulimits_3
+
+- name: ulimits (more ulimits)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ ulimits:
+ - "nofile:1234:1234"
+ - "sigpending:100:200"
+ force_kill: true
+ register: ulimits_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - ulimits_1 is changed
+ - ulimits_2 is not changed
+ - ulimits_3 is not changed
+ - ulimits_4 is changed
+
+####################################################################
+## user ############################################################
+####################################################################
+
+- name: user
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ user: nobody
+ state: started
+ register: user_1
+
+- name: user (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ user: nobody
+ state: started
+ register: user_2
+
+- name: user (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ user: root
+ state: started
+ force_kill: true
+ register: user_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - user_1 is changed
+ - user_2 is not changed
+ - user_3 is changed
+
+####################################################################
+## userns_mode #####################################################
+####################################################################
+
+- name: userns_mode
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ userns_mode: host
+ state: started
+ register: userns_mode_1
+
+- name: userns_mode (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ userns_mode: host
+ state: started
+ register: userns_mode_2
+
+- name: userns_mode (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ userns_mode: ""
+ state: started
+ force_kill: true
+ register: userns_mode_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - userns_mode_1 is changed
+ - userns_mode_2 is not changed
+ - userns_mode_3 is changed
+
+####################################################################
+## uts #############################################################
+####################################################################
+
+- name: uts
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ uts: host
+ state: started
+ register: uts_1
+
+- name: uts (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ uts: host
+ state: started
+ register: uts_2
+
+- name: uts (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ uts: ""
+ state: started
+ force_kill: true
+ register: uts_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - uts_1 is changed
+ - uts_2 is not changed
+ - uts_3 is changed
+
+####################################################################
+## working_dir #####################################################
+####################################################################
+
+- name: working_dir
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ working_dir: /tmp
+ state: started
+ register: working_dir_1
+
+- name: working_dir (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ working_dir: /tmp
+ state: started
+ register: working_dir_2
+
+- name: working_dir (change)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ working_dir: /
+ state: started
+ force_kill: true
+ register: working_dir_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - working_dir_1 is changed
+ - working_dir_2 is not changed
+ - working_dir_3 is changed
+
+####################################################################
+####################################################################
+####################################################################
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/ports.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/ports.yml
new file mode 100644
index 00000000..ced86851
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/ports.yml
@@ -0,0 +1,326 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-options' }}"
+ cname2: "{{ cname_prefix ~ '-options-h1' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname, cname2] }}"
+
+####################################################################
+## published_ports: error handling #################################
+####################################################################
+
+- name: published_ports -- non-closing square bracket
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "[::1:2000:3000"
+ register: published_ports_1
+ ignore_errors: true
+
+- name: published_ports -- forgot square brackets for IPv6
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "::1:2000:3000"
+ register: published_ports_2
+ ignore_errors: true
+
+- name: published_ports -- disallow hostnames
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "foo:2000:3000"
+ register: published_ports_3
+ ignore_errors: true
+
+- assert:
+ that:
+ - published_ports_1 is failed
+ - published_ports_1.msg == 'Cannot find closing "]" in input "[::1:2000:3000" for opening "[" at index 1!'
+ - published_ports_2 is failed
+ - published_ports_2.msg == 'Invalid port description "::1:2000:3000" - expected 1 to 3 colon-separated parts, but got 5. Maybe you forgot to use square brackets ([...]) around an IPv6 address?'
+ - published_ports_3 is failed
+ - "published_ports_3.msg == 'Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. Use the dig lookup to resolve hostnames. (Found hostname: foo)'"
+
+####################################################################
+## published_ports: port range #####################################
+####################################################################
+
+- name: published_ports -- port range
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9010-9050"
+ published_ports:
+ - "9001:9001"
+ - "9010-9050:9010-9050"
+ force_kill: true
+ register: published_ports_1
+
+- name: published_ports -- port range (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9010-9050"
+ published_ports:
+ - "9001:9001"
+ - "9010-9050:9010-9050"
+ force_kill: true
+ register: published_ports_2
+
+- name: published_ports -- port range (different range)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ exposed_ports:
+ - "9001"
+ - "9010-9050"
+ published_ports:
+ - "9001:9001"
+ - "9020-9060:9020-9060"
+ force_kill: true
+ register: published_ports_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - published_ports_1 is changed
+ - published_ports_2 is not changed
+ - published_ports_3 is changed
+
+####################################################################
+## published_ports: one-element container port range ###############
+####################################################################
+
+- name: published_ports -- one-element container port range
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ item }}"
+ state: started
+ published_ports:
+ - "9010-9050:9010"
+ force_kill: true
+ loop:
+ - '{{ cname }}'
+ - '{{ cname2 }}'
+ register: published_ports_1
+
+- name: published_ports -- one-element container port range (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ item }}"
+ state: started
+ published_ports:
+ - "9010-9050:9010"
+ force_kill: true
+ loop:
+ - '{{ cname }}'
+ - '{{ cname2 }}'
+ register: published_ports_2
+
+- name: published_ports -- one-element container port range (different range)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ item }}"
+ state: started
+ published_ports:
+ - "9010-9051:9010"
+ force_kill: true
+ loop:
+ - '{{ cname }}'
+ - '{{ cname2 }}'
+ register: published_ports_3
+
+- name: cleanup
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ loop:
+ - '{{ cname }}'
+ - '{{ cname2 }}'
+ diff: false
+
+- assert:
+ that:
+ - published_ports_1 is changed
+ - published_ports_2 is not changed
+ - published_ports_3 is changed
+
+####################################################################
+## published_ports: IPv6 addresses #################################
+####################################################################
+
+- name: published_ports -- IPv6
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "[::1]:9001:9001"
+ force_kill: true
+ register: published_ports_1
+
+- name: published_ports -- IPv6 (idempotency)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "[::1]:9001:9001"
+ force_kill: true
+ register: published_ports_2
+
+- name: published_ports -- IPv6 (different IP)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "127.0.0.1:9001:9001"
+ force_kill: true
+ register: published_ports_3
+
+- name: published_ports -- IPv6 (hostname)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ published_ports:
+ - "localhost:9001:9001"
+ force_kill: true
+ register: published_ports_4
+ ignore_errors: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - published_ports_1 is changed
+ - published_ports_2 is not changed
+ - published_ports_3 is changed
+ - published_ports_4 is failed
+
+####################################################################
+## publish_all_ports ###############################################
+####################################################################
+
+- set_fact:
+ publish_all_ports_test_cases:
+ - test_name: no_options
+ changed: true
+ - test_name: null_to_true
+ publish_all_ports_value: true
+ changed: true
+ - test_name: true_idempotency
+ publish_all_ports_value: true
+ changed: false
+ - test_name: true_to_null
+ changed: false
+ - test_name: null_to_true_2
+ publish_all_ports_value: true
+ changed: false
+ - test_name: true_to_false
+ publish_all_ports_value: false
+ changed: true
+ - test_name: false_idempotency
+ publish_all_ports_value: false
+ changed: false
+ - test_name: false_to_null
+ changed: false
+ - test_name: null_with_published_ports
+ published_ports_value: &ports
+ - "9001:9001"
+ - "9010-9050:9010-9050"
+ changed: true
+ - test_name: null_to_true_with_published_ports
+ publish_all_ports_value: true
+ published_ports_value: *ports
+ changed: true
+ - test_name: true_idempotency_with_published_ports
+ publish_all_ports_value: true
+ published_ports_value: *ports
+ changed: false
+ - test_name: true_to_null_with_published_ports
+ published_ports_value: *ports
+ changed: false
+ - test_name: null_to_true_2_with_published_ports
+ publish_all_ports_value: true
+ published_ports_value: *ports
+ changed: false
+ - test_name: true_to_false_with_published_ports
+ publish_all_ports_value: false
+ published_ports_value: *ports
+ changed: true
+ - test_name: false_idempotency_with_published_ports
+ publish_all_ports_value: false
+ published_ports_value: *ports
+ changed: false
+ - test_name: false_to_null_with_published_ports
+ published_ports_value: *ports
+ changed: false
+
+- name: publish_all_ports ({{ test_case.test_name }})
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ publish_all_ports: "{{ test_case.publish_all_ports_value | default(omit) }}"
+ published_ports: "{{ test_case.published_ports_value | default(omit) }}"
+ force_kill: true
+ register: publish_all_ports
+ loop_control:
+ loop_var: test_case
+ loop: "{{ publish_all_ports_test_cases }}"
+
+- assert:
+ that:
+ - publish_all_ports.results[index].changed == test_case.changed
+ loop: "{{ publish_all_ports_test_cases }}"
+ loop_control:
+ index_var: index
+ loop_var: test_case
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml
new file mode 100644
index 00000000..928463ae
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/regression-45700-dont-parse-on-absent.yml
@@ -0,0 +1,38 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Regression test for https://github.com/ansible/ansible/pull/45700
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-45700' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+- name: Start container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+
+- name: Stop container with a lot of invalid options
+ docker_container:
+ name: "{{ cname }}"
+ force_kill: true
+ # Some options with "invalid" values, which would
+ # have to be parsed. The values are "invalid" because
+ # the containers and networks listed here do not exist.
+ # This can happen because the networks are removed
+ # before the container is stopped (see
+ # https://github.com/ansible/ansible/issues/45486).
+ networks:
+ - name: "nonexistant-network-{{ (2**32) | random }}"
+ published_ports:
+ - '1:2'
+ - '3'
+ links:
+ - "nonexistant-container-{{ (2**32) | random }}:test"
+ state: absent
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/start-stop.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/start-stop.yml
new file mode 100644
index 00000000..97ac38a5
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/start-stop.yml
@@ -0,0 +1,459 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-hi' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+####################################################################
+## Creation ########################################################
+####################################################################
+
+- name: Create container (check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ check_mode: true
+ register: create_1
+
+- name: Create container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ register: create_2
+
+- name: Create container (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ register: create_3
+
+- name: Create container (idempotent check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ check_mode: true
+ register: create_4
+
+- assert:
+ that:
+ - create_1 is changed
+ - create_2 is changed
+ - create_3 is not changed
+ - create_4 is not changed
+
+####################################################################
+## Starting (after creation) #######################################
+####################################################################
+
+- name: Start container (check)
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ check_mode: true
+ register: start_1
+
+- name: Start container
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ register: start_2
+
+- name: Start container (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ register: start_3
+
+- name: Start container (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+ check_mode: true
+ register: start_4
+
+- assert:
+ that:
+ - start_1 is changed
+ - start_2 is changed
+ - start_3 is not changed
+ - start_4 is not changed
+
+####################################################################
+## Present check for running container #############################
+####################################################################
+
+- name: Present check for running container (check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ check_mode: true
+ register: present_check_1
+
+- name: Present check for running container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ register: present_check_2
+
+- assert:
+ that:
+ - present_check_1 is not changed
+ - present_check_2 is not changed
+
+####################################################################
+## Starting (from scratch) #########################################
+####################################################################
+
+- name: Remove container (setup for starting from scratch)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+
+- name: Start container from scratch (check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ stop_timeout: 1
+ name: "{{ cname }}"
+ state: started
+ check_mode: true
+ register: start_scratch_1
+
+- name: Start container from scratch
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ stop_timeout: 1
+ name: "{{ cname }}"
+ state: started
+ register: start_scratch_2
+
+- name: Start container from scratch (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ stop_timeout: 1
+ name: "{{ cname }}"
+ state: started
+ register: start_scratch_3
+
+- name: Start container from scratch (idempotent check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ stop_timeout: 1
+ name: "{{ cname }}"
+ state: started
+ check_mode: true
+ register: start_scratch_4
+
+- assert:
+ that:
+ - start_scratch_1 is changed
+ - start_scratch_2 is changed
+ - start_scratch_3 is not changed
+ - start_scratch_4 is not changed
+
+####################################################################
+## Recreating ######################################################
+####################################################################
+
+- name: Recreating container (created)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: present
+ force_kill: true
+ register: recreate_1
+
+- name: Recreating container (created, recreate, check mode)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ recreate: true
+ state: present
+ force_kill: true
+ register: recreate_2
+ check_mode: true
+
+- name: Recreating container (created, recreate)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ recreate: true
+ state: present
+ force_kill: true
+ register: recreate_3
+
+- name: Recreating container (started)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ force_kill: true
+ register: recreate_4
+
+- name: Recreating container (started, recreate, check mode)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ recreate: true
+ removal_wait_timeout: 10
+ state: started
+ force_kill: true
+ register: recreate_5
+ check_mode: true
+
+- name: Recreating container (started, recreate)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ recreate: true
+ removal_wait_timeout: 10
+ state: started
+ force_kill: true
+ register: recreate_6
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- debug: var=recreate_1
+- debug: var=recreate_3
+- debug: var=recreate_4
+- debug: var=recreate_6
+
+- assert:
+ that:
+ - recreate_2 is changed
+ - recreate_3 is changed
+ - recreate_4 is changed
+ - recreate_5 is changed
+ - recreate_6 is changed
+ - recreate_1.container.Id == recreate_2.container.Id
+ - recreate_1.container.Id != recreate_3.container.Id
+ - recreate_3.container.Id == recreate_4.container.Id
+ - recreate_4.container.Id == recreate_5.container.Id
+ - recreate_4.container.Id != recreate_6.container.Id
+
+####################################################################
+## Restarting ######################################################
+####################################################################
+
+- name: Restarting
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ stop_timeout: 1
+ volumes:
+ - /tmp/tmp
+ register: restart_1
+
+- name: Restarting (restart, check mode)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart: true
+ state: started
+ stop_timeout: 1
+ force_kill: true
+ register: restart_2
+ check_mode: true
+
+- name: Restarting (restart)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ restart: true
+ state: started
+ stop_timeout: 1
+ force_kill: true
+ register: restart_3
+
+- name: Restarting (verify volumes)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ stop_timeout: 1
+ volumes:
+ - /tmp/tmp
+ register: restart_4
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- assert:
+ that:
+ - restart_1 is changed
+ - restart_2 is changed
+ - restart_3 is changed
+ - restart_1.container.Id == restart_3.container.Id
+ - restart_4 is not changed
+
+####################################################################
+## Stopping ########################################################
+####################################################################
+
+- name: Stop container (check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+ check_mode: true
+ register: stop_1
+
+- name: Stop container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+ register: stop_2
+
+- name: Stop container (idempotent)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+ register: stop_3
+
+- name: Stop container (idempotent check)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+ check_mode: true
+ register: stop_4
+
+- assert:
+ that:
+ - stop_1 is changed
+ - stop_2 is changed
+ - stop_3 is not changed
+ - stop_4 is not changed
+
+####################################################################
+## Removing ########################################################
+####################################################################
+
+- name: Remove container (check)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ check_mode: true
+ register: remove_1
+
+- name: Remove container
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: remove_2
+
+- name: Remove container (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ register: remove_3
+
+- name: Remove container (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ check_mode: true
+ register: remove_4
+
+- assert:
+ that:
+ - remove_1 is changed
+ - remove_2 is changed
+ - remove_3 is not changed
+ - remove_4 is not changed
+
+####################################################################
+## Removing (from running) #########################################
+####################################################################
+
+- name: Start container (setup for removing from running)
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+
+- name: Remove container from running (check)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ check_mode: true
+ register: remove_from_running_1
+
+- name: Remove container from running
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ register: remove_from_running_2
+
+- name: Remove container from running (idempotent)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ register: remove_from_running_3
+
+- name: Remove container from running (idempotent check)
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ check_mode: true
+ register: remove_from_running_4
+
+- assert:
+ that:
+ - remove_from_running_1 is changed
+ - remove_from_running_2 is changed
+ - remove_from_running_3 is not changed
+ - remove_from_running_4 is not changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/update.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/update.yml
new file mode 100644
index 00000000..a180e0f5
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container/tasks/tests/update.yml
@@ -0,0 +1,212 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-update' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+# We do not test cpuset_cpus and cpuset_mems since changing it fails if the system does
+# not have 'enough' CPUs. We do not test kernel_memory since it is deprecated and fails.
+
+- set_fact:
+ has_blkio_weight: true
+
+- name: Create container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: "{{ 123 if has_blkio_weight else omit }}"
+ cpu_period: 90000
+ cpu_quota: 150000
+ cpu_shares: 900
+ memory: 64M
+ memory_reservation: 64M
+ memory_swap: 64M
+ restart_policy: on-failure
+ restart_retries: 5
+ register: create
+ ignore_errors: true
+
+- when: create is failed
+ block:
+ - name: Make sure container is not there
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+
+ - when: "'setting cgroup config for procHooks process caused: failed to write' in create.msg and 'io.bfq.weight' in create.msg"
+ set_fact:
+ has_blkio_weight: false
+
+ - name: Create container again
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: "{{ 123 if has_blkio_weight else omit }}"
+ cpu_period: 90000
+ cpu_quota: 150000
+ cpu_shares: 900
+ memory: 64M
+ memory_reservation: 64M
+ memory_swap: 64M
+ restart_policy: on-failure
+ restart_retries: 5
+ register: create_2
+
+ - when: "'setting cgroup config for procHooks process caused: failed to write' in create.msg and 'io.bfq.weight' in create.msg"
+ set_fact:
+ create: "{{ create_2 }}"
+
+- name: Update values
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: "{{ 234 if has_blkio_weight else omit }}"
+ cpu_period: 50000
+ cpu_quota: 50000
+ cpu_shares: 1100
+ memory: 48M
+ memory_reservation: 48M
+ memory_swap: unlimited
+ restart_policy: on-failure # only on-failure can have restart_retries, so don't change it here
+ restart_retries: 2
+ register: update
+ diff: true
+
+- name: Update values again
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: "{{ 135 if has_blkio_weight else omit }}"
+ cpu_period: 30000
+ cpu_quota: 40000
+ cpu_shares: 1000
+ memory: 32M
+ memory_reservation: 30M
+ memory_swap: 128M
+ restart_policy: always
+ restart_retries: 0
+ register: update2
+ diff: true
+
+- name: Recreate container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 20m"' # this will force re-creation
+ name: "{{ cname }}"
+ state: started
+ blkio_weight: "{{ 234 if has_blkio_weight else omit }}"
+ cpu_period: 50000
+ cpu_quota: 50000
+ cpu_shares: 1100
+ memory: 48M
+ memory_reservation: 48M
+ memory_swap: unlimited
+ restart_policy: on-failure
+ restart_retries: 2
+ force_kill: true
+ register: recreate
+ diff: true
+
+- name: cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+ diff: false
+
+- name: Check general things
+ assert:
+ that:
+ - create is changed
+ - update is changed
+ - update2 is changed
+ - recreate is changed
+
+ # Make sure the container was *not* recreated when it should not be
+ - create.container.Id == update.container.Id
+ - create.container.Id == update2.container.Id
+
+ # Make sure that the container was recreated when it should be
+ - create.container.Id != recreate.container.Id
+
+- name: Check diff for first update
+ assert:
+ that:
+ # blkio_weight sometimes cannot be set, then we end up with 0 instead of the value we had
+ - not has_blkio_weight or update.diff.before.blkio_weight == 123 or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (create.warnings | default([]))
+ - not has_blkio_weight or update.diff.after.blkio_weight == 234
+ - update.diff.before.cpu_period == 90000
+ - update.diff.after.cpu_period == 50000
+ - update.diff.before.cpu_quota == 150000
+ - update.diff.after.cpu_quota == 50000
+ - update.diff.before.cpu_shares == 900
+ - update.diff.after.cpu_shares == 1100
+ - update.diff.before.memory == 67108864
+ - update.diff.after.memory == 50331648
+ - update.diff.before.memory_reservation == 67108864
+ - update.diff.after.memory_reservation == 50331648
+ - (update.diff.before.memory_swap | default(0)) == 67108864 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([]))
+ - (update.diff.after.memory_swap | default(0)) == -1 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([]))
+ - "'restart_policy' not in update.diff.before"
+ - update.diff.before.restart_retries == 5
+ - update.diff.after.restart_retries == 2
+
+- name: Check diff for second update
+ assert:
+ that:
+ - not has_blkio_weight or update2.diff.before.blkio_weight == 234 or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (create.warnings | default([]))
+ - not has_blkio_weight or update2.diff.after.blkio_weight == 135
+ - update2.diff.before.cpu_period == 50000
+ - update2.diff.after.cpu_period == 30000
+ - update2.diff.before.cpu_quota == 50000
+ - update2.diff.after.cpu_quota == 40000
+ - update2.diff.before.cpu_shares == 1100
+ - update2.diff.after.cpu_shares == 1000
+ - update2.diff.before.memory == 50331648
+ - update2.diff.after.memory == 33554432
+ - update2.diff.before.memory_reservation == 50331648
+ - update2.diff.after.memory_reservation == 31457280
+ - (update2.diff.before.memory_swap | default(0)) == -1 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([]))
+ - (update2.diff.after.memory_swap | default(0)) == 134217728 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([]))
+ - update2.diff.before.restart_policy == 'on-failure'
+ - update2.diff.after.restart_policy == 'always'
+ - update2.diff.before.restart_retries == 2
+ - update2.diff.after.restart_retries == 0
+
+- name: Check diff for recreation
+ assert:
+ that:
+ - not has_blkio_weight or recreate.diff.before.blkio_weight == 135 or 'Docker warning: Your kernel does not support Block I/O weight or the cgroup is not mounted. Weight discarded.' in (create.warnings | default([]))
+ - not has_blkio_weight or recreate.diff.after.blkio_weight == 234
+ - recreate.diff.before.cpu_period == 30000
+ - recreate.diff.after.cpu_period == 50000
+ - recreate.diff.before.cpu_quota == 40000
+ - recreate.diff.after.cpu_quota == 50000
+ - recreate.diff.before.cpu_shares == 1000
+ - recreate.diff.after.cpu_shares == 1100
+ - recreate.diff.before.memory == 33554432
+ - recreate.diff.after.memory == 50331648
+ - recreate.diff.before.memory_reservation == 31457280
+ - recreate.diff.after.memory_reservation == 50331648
+ - (recreate.diff.before.memory_swap | default(0)) == 134217728 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([]))
+ - (recreate.diff.after.memory_swap | default(0)) == -1 or 'Docker warning: Your kernel does not support swap limit capabilities or the cgroup is not mounted. Memory limited without swap.' in (create.warnings | default([]))
+ - recreate.diff.before.restart_policy == 'always'
+ - recreate.diff.after.restart_policy == 'on-failure'
+ - recreate.diff.before.restart_retries == 0
+ - recreate.diff.after.restart_retries == 2
+ - recreate.diff.before.command == ['/bin/sh', '-c', 'sleep 10m']
+ - recreate.diff.after.command == ['/bin/sh', '-c', 'sleep 20m']
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/meta/main.yml
new file mode 100644
index 00000000..2650229d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/main.yml
new file mode 100644
index 00000000..20f9a268
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/main.yml
@@ -0,0 +1,45 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Gather facts on controller
+ setup:
+ gather_subset: '!all'
+ delegate_to: localhost
+ delegate_facts: true
+ run_once: true
+
+# Create random name prefix (for containers)
+- name: Create random container name prefix
+ set_fact:
+ cname_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+ cnames: []
+
+- debug:
+ msg: "Using container name prefix {{ cname_prefix }}"
+
+# Run the tests
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ with_items: "{{ cnames }}"
+ diff: false
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old Docker API version to run all docker_container_copy_into tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/tests/content.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/tests/content.yml
new file mode 100644
index 00000000..b49c05a9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/tests/content.yml
@@ -0,0 +1,1197 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-c' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+# Create container
+
+- name: Create container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command:
+ - /bin/sh
+ - "-c"
+ - >-
+ mkdir /dir;
+ ln -s file /lnk;
+ ln -s lnk3 /lnk2;
+ ln -s lnk2 /lnk1;
+ sleep 10m;
+ name: "{{ cname }}"
+ state: started
+
+################################################################################################
+# Do tests
+
+- name: Copy content without mode
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ register: result
+ ignore_errors: true
+
+- name: Check results
+ assert:
+ that:
+ - result is failed
+ - |-
+ result.msg in [
+ "missing parameter(s) required by 'content': mode",
+ ]
+
+######################### Copy
+
+- name: Copy content (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0644
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy content (check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0644
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy content (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0644
+ register: result_2
+
+- name: Copy content (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0644
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy content (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0644
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy content (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0644
+ register: result_4
+
+- name: Copy content (idempotent, check mode, base 64)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: "{{ 'Content 1\n' | b64encode }}"
+ content_is_b64: true
+ container_path: '/file'
+ mode: 0644
+ check_mode: true
+ diff: false
+ register: result_3b64
+
+- name: Copy content (idempotent, check mode, base 64, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: "{{ 'Content 1\n' | b64encode }}"
+ content_is_b64: true
+ container_path: '/file'
+ mode: 0644
+ check_mode: true
+ diff: true
+ register: result_3b64_diff
+
+- name: Copy content (idempotent, base 64)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: "{{ 'Content 1\n' | b64encode }}"
+ content_is_b64: true
+ container_path: '/file'
+ mode: 0644
+ register: result_4b64
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Copy content (force, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0644
+ force: true
+ check_mode: true
+ diff: false
+ register: result_6
+
+- name: Copy content (force, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0644
+ force: true
+ check_mode: true
+ diff: true
+ register: result_6_diff
+
+- name: Copy content (force)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0644
+ force: true
+ register: result_7
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_8
+
+- name: Copy content (force=false, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Some other content
+ container_path: '/file'
+ mode: 0777
+ owner_id: 123
+ group_id: 321
+ force: false
+ check_mode: true
+ diff: false
+ register: result_9
+
+- name: Copy content (force=false, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Some other content
+ container_path: '/file'
+ mode: 0777
+ owner_id: 123
+ group_id: 321
+ force: false
+ check_mode: true
+ diff: true
+ register: result_9_diff
+
+- name: Copy content (force=false)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Some other content
+ container_path: '/file'
+ mode: 0777
+ owner_id: 123
+ group_id: 321
+ force: false
+ register: result_10
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_11
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == ''
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == 'dynamically generated'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 1\n'
+ - result_3_diff.diff.before_header == '/file'
+ - result_3_diff.diff.after == 'Content 1\n'
+ - result_3_diff.diff.after_header == 'dynamically generated'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_3b64 is not changed
+ - "'diff' not in result_3b64"
+ - result_3b64_diff.diff.before == 'Content 1\n'
+ - result_3b64_diff.diff.before_header == '/file'
+ - result_3b64_diff.diff.after == 'Content 1\n'
+ - result_3b64_diff.diff.after_header == 'dynamically generated'
+ - result_3b64 == (result_3b64_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4b64 is not changed
+ - result_5.stdout | b64decode == 'Content 1\n'
+ - result_5.stderr == '10 644 regular file 0 0 /file'
+ - result_6 is changed
+ - "'diff' not in result_6"
+ - result_6_diff.diff.before == 'Content 1\n'
+ - result_6_diff.diff.before_header == '/file'
+ - result_6_diff.diff.after == 'Content 1\n'
+ - result_6_diff.diff.after_header == 'dynamically generated'
+ - result_6 == (result_6_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_7 is changed
+ - result_8.stdout | b64decode == 'Content 1\n'
+ - result_8.stderr == '10 644 regular file 0 0 /file'
+ - result_9 is not changed
+ - "'diff' not in result_9"
+ - result_9_diff.diff.before == 'Content 1\n'
+ - result_9_diff.diff.before_header == '/file'
+ - result_9_diff.diff.after == 'Content 1\n' # note that force=false
+ - result_9_diff.diff.after_header == '/file' # note that force=false
+ - result_9 == (result_9_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_10 is not changed
+ - result_11.stdout | b64decode == 'Content 1\n'
+ - result_11.stderr == '10 644 regular file 0 0 /file'
+
+######################### Follow link - idempotence
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr;
+ chdir: /root
+ register: result_0
+
+- name: Copy content following link (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ follow: true
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy content following link (check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ follow: true
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy content following link
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ follow: true
+ register: result_2
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_3
+
+- name: Copy content following link (force, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ follow: true
+ force: true
+ check_mode: true
+ diff: false
+ register: result_4
+
+- name: Copy content following link (force, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ follow: true
+ force: true
+ check_mode: true
+ diff: true
+ register: result_4_diff
+
+- name: Copy content following link (force)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ follow: true
+ force: true
+ register: result_5
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_6
+
+- name: Check results
+ assert:
+ that:
+ - result_0.stdout | b64decode == 'Content 1\n'
+ - result_0.stderr == "4 777 symbolic link 0 0 '/lnk' -> 'file'"
+ - result_1 is not changed
+ - result_1.container_path == '/file'
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 1\n'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == 'dynamically generated'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is not changed
+ - result_2.container_path == '/file'
+ - result_3.stdout | b64decode == 'Content 1\n'
+ - result_3.stderr_lines[0] == "4 777 symbolic link 0 0 '/lnk' -> 'file'"
+ - result_3.stderr_lines[1] == '10 644 regular file 0 0 /file'
+ - result_4 is changed
+ - result_4.container_path == '/file'
+ - "'diff' not in result_4"
+ - result_4_diff.diff.before == 'Content 1\n'
+ - result_4_diff.diff.before_header == '/file'
+ - result_4_diff.diff.after == 'Content 1\n'
+ - result_4_diff.diff.after_header == 'dynamically generated'
+ - result_4 == (result_4_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_5 is changed
+ - result_5.container_path == '/file'
+ - result_6.stdout | b64decode == 'Content 1\n'
+ - result_6.stderr_lines[0] == "4 777 symbolic link 0 0 '/lnk' -> 'file'"
+ - result_6.stderr_lines[1] == '10 644 regular file 0 0 /file'
+
+######################### Do not follow link - replace by file
+
+- name: Copy content not following link (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ follow: false
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy content not following link (check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ follow: false
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy content not following link
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ follow: false
+ register: result_2
+
+- name: Copy content not following link (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy content not following link (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy content not following link (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ register: result_4
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Copy content not following link (force, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ force: true
+ check_mode: true
+ diff: false
+ register: result_6
+
+- name: Copy content not following link (force, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ force: true
+ check_mode: true
+ diff: true
+ register: result_6_diff
+
+- name: Copy content not following link (force)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/lnk'
+ mode: 0644
+ force: true
+ register: result_7
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr
+ chdir: /root
+ register: result_8
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - result_1.container_path == '/lnk'
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == '/file'
+ - result_1_diff.diff.before_header == '/lnk'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == 'dynamically generated'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_2.container_path == '/lnk'
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 1\n'
+ - result_3_diff.diff.before_header == '/lnk'
+ - result_3_diff.diff.after == 'Content 1\n'
+ - result_3_diff.diff.after_header == 'dynamically generated'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5.stdout | b64decode == 'Content 1\n'
+ - result_5.stderr == '10 644 regular file 0 0 /lnk'
+ - result_6 is changed
+ - result_6.container_path == '/lnk'
+ - "'diff' not in result_6"
+ - result_6_diff.diff.before == 'Content 1\n'
+ - result_6_diff.diff.before_header == '/lnk'
+ - result_6_diff.diff.after == 'Content 1\n'
+ - result_6_diff.diff.after_header == 'dynamically generated'
+ - result_6 == (result_6_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_7 is changed
+ - result_7.container_path == '/lnk'
+ - result_8.stdout | b64decode == 'Content 1\n'
+ - result_8.stderr == '10 644 regular file 0 0 /lnk'
+
+######################### Replace directory by file
+
+- name: Copy content to replace directory (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/dir'
+ mode: 0644
+ follow: false
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy content to replace directory (check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/dir'
+ mode: 0644
+ follow: false
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy content to replace directory (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/dir'
+ mode: 0644
+ follow: false
+ register: result_2
+
+- name: Copy content to replace directory (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/dir'
+ mode: 0644
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy content to replace directory (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/dir'
+ mode: 0644
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy content to replace directory (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/dir'
+ mode: 0644
+ register: result_4
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /dir | base64;
+ stat -c '%s %a %F %u %g %N' /dir > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - result_1.container_path == '/dir'
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == '(directory)'
+ - result_1_diff.diff.before_header == '/dir'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == 'dynamically generated'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_2.container_path == '/dir'
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 1\n'
+ - result_3_diff.diff.before_header == '/dir'
+ - result_3_diff.diff.after == 'Content 1\n'
+ - result_3_diff.diff.after_header == 'dynamically generated'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5.stdout | b64decode == 'Content 1\n'
+ - result_5.stderr == '10 644 regular file 0 0 /dir'
+
+######################### Modify
+
+- name: Copy content (changed, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0644
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy content (changed, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0644
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy content (changed)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0644
+ register: result_2
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_3
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 1\n'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 2\nExtra line'
+ - result_1_diff.diff.after_header == 'dynamically generated'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3.stdout | b64decode == 'Content 2\nExtra line'
+ - result_3.stderr == '20 644 regular file 0 0 /file'
+
+######################### Change mode
+
+- name: Copy content (mode changed, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy content (mode changed, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy content (mode changed)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ register: result_2
+
+- name: Copy content (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy content (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy content (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ register: result_4
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 2\nExtra line'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 2\nExtra line'
+ - result_1_diff.diff.after_header == 'dynamically generated'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 2\nExtra line'
+ - result_3_diff.diff.before_header == '/file'
+ - result_3_diff.diff.after == 'Content 2\nExtra line'
+ - result_3_diff.diff.after_header == 'dynamically generated'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5.stdout | b64decode == 'Content 2\nExtra line'
+ - result_5.stderr == '20 707 regular file 0 0 /file'
+
+######################### Change owner and group
+
+- name: Copy content (owner/group changed, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy content (owner/group changed, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy content (owner/group changed)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ register: result_2
+
+- name: Copy content (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy content (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy content (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ register: result_4
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Copy content (owner/group changed again, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ owner_id: 13
+ group_id: 13
+ check_mode: true
+ diff: false
+ register: result_6
+
+- name: Copy content (owner/group changed again, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ owner_id: 13
+ group_id: 13
+ check_mode: true
+ diff: true
+ register: result_6_diff
+
+- name: Copy content (owner/group changed again)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |-
+ Content 2
+ Extra line
+ container_path: '/file'
+ mode: 0707
+ owner_id: 13
+ group_id: 13
+ register: result_7
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_8
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 2\nExtra line'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 2\nExtra line'
+ - result_1_diff.diff.after_header == 'dynamically generated'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 2\nExtra line'
+ - result_3_diff.diff.before_header == '/file'
+ - result_3_diff.diff.after == 'Content 2\nExtra line'
+ - result_3_diff.diff.after_header == 'dynamically generated'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5.stdout | b64decode == 'Content 2\nExtra line'
+ - result_5.stderr == '20 707 regular file 12 910 /file'
+ - result_6 is changed
+ - "'diff' not in result_6"
+ - result_6_diff.diff.before == 'Content 2\nExtra line'
+ - result_6_diff.diff.before_header == '/file'
+ - result_6_diff.diff.after == 'Content 2\nExtra line'
+ - result_6_diff.diff.after_header == 'dynamically generated'
+ - result_6 == (result_6_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_7 is changed
+ - result_8.stdout | b64decode == 'Content 2\nExtra line'
+ - result_8.stderr == '20 707 regular file 13 13 /file'
+
+######################### Operate with stopped container
+
+- name: Stop container
+ docker_container:
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+
+- name: Copy content (stopped container, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy content (stopped container, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy content (stopped container)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ register: result_2
+
+- name: Copy content (stopped container, idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy content (stopped container, idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy content (stopped container, idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ register: result_4
+
+- name: Copy content (stopped container, no owner/group provided, should fail)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ content: |
+ Content 1
+ container_path: '/file'
+ mode: 0707
+ register: result_5
+ ignore_errors: true
+
+- name: Start container
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_6
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 2\nExtra line'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == 'dynamically generated'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 1\n'
+ - result_3_diff.diff.before_header == '/file'
+ - result_3_diff.diff.after == 'Content 1\n'
+ - result_3_diff.diff.after_header == 'dynamically generated'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5 is failed
+ - result_5.msg == ('Cannot execute command in paused container "' ~ cname ~ '"')
+ - result_6.stdout | b64decode == 'Content 1\n'
+ - result_6.stderr == '10 707 regular file 12 910 /file'
+
+################################################################################################
+# Cleanup
+
+- name: Remove container
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/tests/file.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/tests/file.yml
new file mode 100644
index 00000000..5431ae35
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_copy_into/tasks/tests/file.yml
@@ -0,0 +1,1065 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ cname: "{{ cname_prefix ~ '-f' }}"
+- name: Registering container name
+ set_fact:
+ cnames: "{{ cnames + [cname] }}"
+
+# Create container
+
+- name: Create container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command:
+ - /bin/sh
+ - "-c"
+ - >-
+ mkdir /dir;
+ ln -s file /lnk;
+ ln -s lnk3 /lnk2;
+ ln -s lnk2 /lnk1;
+ sleep 10m;
+ name: "{{ cname }}"
+ state: started
+
+# Create files
+
+- name: Create file 1
+ copy:
+ dest: '{{ remote_tmp_dir }}/file_1'
+ content: |
+ Content 1
+ mode: 0644
+
+- name: Create file 2
+ copy:
+ dest: '{{ remote_tmp_dir }}/file_2'
+ content: |-
+ Content 2
+ Extra line
+ mode: 0644
+
+- name: Create link 1
+ file:
+ dest: '{{ remote_tmp_dir }}/link_1'
+ state: link
+ src: file_1
+ follow: false
+ mode: 0644
+
+- name: Create link 2
+ file:
+ dest: '{{ remote_tmp_dir }}/link_2'
+ state: link
+ src: dead
+ force: true
+ follow: false
+ mode: 0644
+
+################################################################################################
+# Do tests
+
+######################### Copy
+
+- name: Copy file (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy file (check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy file (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ register: result_2
+
+- name: Copy file (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy file (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy file (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ register: result_4
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Copy file (force, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ force: true
+ check_mode: true
+ diff: false
+ register: result_6
+
+- name: Copy file (force, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ force: true
+ check_mode: true
+ diff: true
+ register: result_6_diff
+
+- name: Copy file (force)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ force: true
+ register: result_7
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_8
+
+- name: Copy file (force=false, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0777
+ owner_id: 123
+ group_id: 321
+ force: false
+ check_mode: true
+ diff: false
+ register: result_9
+
+- name: Copy file (force=false, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0777
+ owner_id: 123
+ group_id: 321
+ force: false
+ check_mode: true
+ diff: true
+ register: result_9_diff
+
+- name: Copy file (force=false)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0777
+ owner_id: 123
+ group_id: 321
+ force: false
+ register: result_10
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_11
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == ''
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 1\n'
+ - result_3_diff.diff.before_header == '/file'
+ - result_3_diff.diff.after == 'Content 1\n'
+ - result_3_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5.stdout | b64decode == 'Content 1\n'
+ - result_5.stderr == '10 644 regular file 0 0 /file'
+ - result_6 is changed
+ - "'diff' not in result_6"
+ - result_6_diff.diff.before == 'Content 1\n'
+ - result_6_diff.diff.before_header == '/file'
+ - result_6_diff.diff.after == 'Content 1\n'
+ - result_6_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_6 == (result_6_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_7 is changed
+ - result_8.stdout | b64decode == 'Content 1\n'
+ - result_8.stderr == '10 644 regular file 0 0 /file'
+ - result_9 is not changed
+ - "'diff' not in result_9"
+ - result_9_diff.diff.before == 'Content 1\n'
+ - result_9_diff.diff.before_header == '/file'
+ - result_9_diff.diff.after == 'Content 1\n' # note that force=false
+ - result_9_diff.diff.after_header == '/file' # note that force=false
+ - result_9 == (result_9_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_10 is not changed
+ - result_11.stdout | b64decode == 'Content 1\n'
+ - result_11.stderr == '10 644 regular file 0 0 /file'
+
+######################### Follow link - idempotence
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr;
+ chdir: /root
+ register: result_0
+
+- name: Copy file following link (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ follow: true
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy file following link (check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ follow: true
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy file following link
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ follow: true
+ register: result_2
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_3
+
+- name: Copy file following link (force, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ follow: true
+ force: true
+ check_mode: true
+ diff: false
+ register: result_4
+
+- name: Copy file following link (force, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ follow: true
+ force: true
+ check_mode: true
+ diff: true
+ register: result_4_diff
+
+- name: Copy file following link (force)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ follow: true
+ force: true
+ register: result_5
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_6
+
+- name: Check results
+ assert:
+ that:
+ - result_0.stdout | b64decode == 'Content 1\n'
+ - result_0.stderr == "4 777 symbolic link 0 0 '/lnk' -> 'file'"
+ - result_1 is not changed
+ - result_1.container_path == '/file'
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 1\n'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is not changed
+ - result_2.container_path == '/file'
+ - result_3.stdout | b64decode == 'Content 1\n'
+ - result_3.stderr_lines[0] == "4 777 symbolic link 0 0 '/lnk' -> 'file'"
+ - result_3.stderr_lines[1] == '10 644 regular file 0 0 /file'
+ - result_4 is changed
+ - result_4.container_path == '/file'
+ - "'diff' not in result_4"
+ - result_4_diff.diff.before == 'Content 1\n'
+ - result_4_diff.diff.before_header == '/file'
+ - result_4_diff.diff.after == 'Content 1\n'
+ - result_4_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_4 == (result_4_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_5 is changed
+ - result_5.container_path == '/file'
+ - result_6.stdout | b64decode == 'Content 1\n'
+ - result_6.stderr_lines[0] == "4 777 symbolic link 0 0 '/lnk' -> 'file'"
+ - result_6.stderr_lines[1] == '10 644 regular file 0 0 /file'
+
+######################### Do not follow link - replace by file
+
+- name: Copy file not following link (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ follow: false
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy file not following link (check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ follow: false
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy file not following link
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ follow: false
+ register: result_2
+
+- name: Copy file not following link (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy file not following link (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy file not following link (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ register: result_4
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Copy file not following link (force, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ force: true
+ check_mode: true
+ diff: false
+ register: result_6
+
+- name: Copy file not following link (force, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ force: true
+ check_mode: true
+ diff: true
+ register: result_6_diff
+
+- name: Copy file not following link (force)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/lnk'
+ force: true
+ register: result_7
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /lnk | base64;
+ stat -c '%s %a %F %u %g %N' /lnk > /dev/stderr
+ chdir: /root
+ register: result_8
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - result_1.container_path == '/lnk'
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == '/file'
+ - result_1_diff.diff.before_header == '/lnk'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_2.container_path == '/lnk'
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 1\n'
+ - result_3_diff.diff.before_header == '/lnk'
+ - result_3_diff.diff.after == 'Content 1\n'
+ - result_3_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5.stdout | b64decode == 'Content 1\n'
+ - result_5.stderr == '10 644 regular file 0 0 /lnk'
+ - result_6 is changed
+ - result_6.container_path == '/lnk'
+ - "'diff' not in result_6"
+ - result_6_diff.diff.before == 'Content 1\n'
+ - result_6_diff.diff.before_header == '/lnk'
+ - result_6_diff.diff.after == 'Content 1\n'
+ - result_6_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_6 == (result_6_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_7 is changed
+ - result_7.container_path == '/lnk'
+ - result_8.stdout | b64decode == 'Content 1\n'
+ - result_8.stderr == '10 644 regular file 0 0 /lnk'
+
+######################### Replace directory by file
+
+- name: Copy file to replace directory (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/dir'
+ follow: false
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy file to replace directory (check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/dir'
+ follow: false
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy file to replace directory (check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/dir'
+ follow: false
+ register: result_2
+
+- name: Copy file to replace directory (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/dir'
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy file to replace directory (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/dir'
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy file to replace directory (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/dir'
+ register: result_4
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /dir | base64;
+ stat -c '%s %a %F %u %g %N' /dir > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - result_1.container_path == '/dir'
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == '(directory)'
+ - result_1_diff.diff.before_header == '/dir'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_2.container_path == '/dir'
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 1\n'
+ - result_3_diff.diff.before_header == '/dir'
+ - result_3_diff.diff.after == 'Content 1\n'
+ - result_3_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5.stdout | b64decode == 'Content 1\n'
+ - result_5.stderr == '10 644 regular file 0 0 /dir'
+
+######################### Modify
+
+- name: Copy file (changed, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy file (changed, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy file (changed)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ register: result_2
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_3
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 1\n'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 2\nExtra line'
+ - result_1_diff.diff.after_header == remote_tmp_dir ~ '/file_2'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3.stdout | b64decode == 'Content 2\nExtra line'
+ - result_3.stderr == '20 644 regular file 0 0 /file'
+
+######################### Change mode
+
+- name: Copy file (mode changed, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy file (mode changed, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy file (mode changed)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ register: result_2
+
+- name: Copy file (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy file (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy file (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ register: result_4
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 2\nExtra line'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 2\nExtra line'
+ - result_1_diff.diff.after_header == remote_tmp_dir ~ '/file_2'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 2\nExtra line'
+ - result_3_diff.diff.before_header == '/file'
+ - result_3_diff.diff.after == 'Content 2\nExtra line'
+ - result_3_diff.diff.after_header == remote_tmp_dir ~ '/file_2'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5.stdout | b64decode == 'Content 2\nExtra line'
+ - result_5.stderr == '20 707 regular file 0 0 /file'
+
+######################### Change owner and group
+
+- name: Copy file (owner/group changed, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy file (owner/group changed, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy file (owner/group changed)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ register: result_2
+
+- name: Copy file (idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy file (idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy file (idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ register: result_4
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_5
+
+- name: Copy file (owner/group changed again, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 13
+ group_id: 13
+ check_mode: true
+ diff: false
+ register: result_6
+
+- name: Copy file (owner/group changed again, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 13
+ group_id: 13
+ check_mode: true
+ diff: true
+ register: result_6_diff
+
+- name: Copy file (owner/group changed again)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_2'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 13
+ group_id: 13
+ register: result_7
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_8
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 2\nExtra line'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 2\nExtra line'
+ - result_1_diff.diff.after_header == remote_tmp_dir ~ '/file_2'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 2\nExtra line'
+ - result_3_diff.diff.before_header == '/file'
+ - result_3_diff.diff.after == 'Content 2\nExtra line'
+ - result_3_diff.diff.after_header == remote_tmp_dir ~ '/file_2'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5.stdout | b64decode == 'Content 2\nExtra line'
+ - result_5.stderr == '20 707 regular file 12 910 /file'
+ - result_6 is changed
+ - "'diff' not in result_6"
+ - result_6_diff.diff.before == 'Content 2\nExtra line'
+ - result_6_diff.diff.before_header == '/file'
+ - result_6_diff.diff.after == 'Content 2\nExtra line'
+ - result_6_diff.diff.after_header == remote_tmp_dir ~ '/file_2'
+ - result_6 == (result_6_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_7 is changed
+ - result_8.stdout | b64decode == 'Content 2\nExtra line'
+ - result_8.stderr == '20 707 regular file 13 13 /file'
+
+######################### Operate with stopped container
+
+- name: Stop container
+ docker_container:
+ name: "{{ cname }}"
+ state: stopped
+ stop_timeout: 1
+
+- name: Copy file (stopped container, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: false
+ register: result_1
+
+- name: Copy file (stopped container, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: true
+ register: result_1_diff
+
+- name: Copy file (stopped container)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ register: result_2
+
+- name: Copy file (stopped container, idempotent, check mode)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: false
+ register: result_3
+
+- name: Copy file (stopped container, idempotent, check mode, diff)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ check_mode: true
+ diff: true
+ register: result_3_diff
+
+- name: Copy file (stopped container, idempotent)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ mode: 0707
+ owner_id: 12
+ group_id: 910
+ register: result_4
+
+- name: Copy file (stopped container, no owner/group provided, should fail)
+ docker_container_copy_into:
+ container: '{{ cname }}'
+ path: '{{ remote_tmp_dir }}/file_1'
+ container_path: '/file'
+ mode: 0707
+ register: result_5
+ ignore_errors: true
+
+- name: Start container
+ docker_container:
+ name: "{{ cname }}"
+ state: started
+
+- name: Dump file
+ docker_container_exec:
+ container: '{{ cname }}'
+ argv:
+ - /bin/sh
+ - "-c"
+ - >-
+ cat /file | base64;
+ stat -c '%s %a %F %u %g %N' /file > /dev/stderr
+ chdir: /root
+ register: result_6
+
+- name: Check results
+ assert:
+ that:
+ - result_1 is changed
+ - "'diff' not in result_1"
+ - result_1_diff.diff.before == 'Content 2\nExtra line'
+ - result_1_diff.diff.before_header == '/file'
+ - result_1_diff.diff.after == 'Content 1\n'
+ - result_1_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_1 == (result_1_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_2 is changed
+ - result_3 is not changed
+ - "'diff' not in result_3"
+ - result_3_diff.diff.before == 'Content 1\n'
+ - result_3_diff.diff.before_header == '/file'
+ - result_3_diff.diff.after == 'Content 1\n'
+ - result_3_diff.diff.after_header == remote_tmp_dir ~ '/file_1'
+ - result_3 == (result_3_diff | dict2items | rejectattr('key', 'eq', 'diff') | items2dict)
+ - result_4 is not changed
+ - result_5 is failed
+ - result_5.msg == ('Cannot execute command in paused container "' ~ cname ~ '"')
+ - result_6.stdout | b64decode == 'Content 1\n'
+ - result_6.stderr == '10 707 regular file 12 910 /file'
+
+################################################################################################
+# Cleanup
+
+- name: Remove container
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_exec/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_container_exec/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_exec/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_exec/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container_exec/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_exec/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_exec/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container_exec/tasks/main.yml
new file mode 100644
index 00000000..61c3b81e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_exec/tasks/main.yml
@@ -0,0 +1,228 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Create random container name
+ set_fact:
+ cname: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+
+ - name: Make sure container is not there
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+
+ - name: Execute in a non-present container
+ docker_container_exec:
+ container: "{{ cname }}"
+ command: "/bin/bash -c 'ls -a'"
+ register: result
+ ignore_errors: true
+
+ - assert:
+ that:
+ - result is failed
+ - "'Could not find container' in result.msg"
+
+ - name: Make sure container exists
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ force_kill: true
+
+ - name: Execute in a present container (command)
+ docker_container_exec:
+ container: "{{ cname }}"
+ command: "/bin/sh -c 'ls -a'"
+ register: result_cmd
+
+ - assert:
+ that:
+ - result_cmd.rc == 0
+ - "'stdout' in result_cmd"
+ - "'stdout_lines' in result_cmd"
+ - "'stderr' in result_cmd"
+ - "'stderr_lines' in result_cmd"
+
+ - name: Execute in a present container (argv)
+ docker_container_exec:
+ container: "{{ cname }}"
+ argv:
+ - /bin/sh
+ - '-c'
+ - ls -a
+ register: result_argv
+
+ - assert:
+ that:
+ - result_argv.rc == 0
+ - "'stdout' in result_argv"
+ - "'stdout_lines' in result_argv"
+ - "'stderr' in result_argv"
+ - "'stderr_lines' in result_argv"
+ - result_cmd.stdout == result_argv.stdout
+
+ - name: Execute in a present container (cat without stdin)
+ docker_container_exec:
+ container: "{{ cname }}"
+ argv:
+ - /bin/sh
+ - '-c'
+ - cat
+ register: result
+
+ - assert:
+ that:
+ - result.rc == 0
+ - result.stdout == ''
+ - result.stdout_lines == []
+ - result.stderr == ''
+ - result.stderr_lines == []
+
+ - name: Execute in a present container (cat with stdin)
+ docker_container_exec:
+ container: "{{ cname }}"
+ argv:
+ - /bin/sh
+ - '-c'
+ - cat
+ stdin: Hello world!
+ strip_empty_ends: false
+ register: result
+
+ - assert:
+ that:
+ - result.rc == 0
+ - result.stdout == 'Hello world!\n'
+ - result.stdout_lines == ['Hello world!']
+ - result.stderr == ''
+ - result.stderr_lines == []
+
+ - name: Execute in a present container (cat with stdin, no newline)
+ docker_container_exec:
+ container: "{{ cname }}"
+ argv:
+ - /bin/sh
+ - '-c'
+ - cat
+ stdin: Hello world!
+ stdin_add_newline: false
+ strip_empty_ends: false
+ register: result
+
+ - assert:
+ that:
+ - result.rc == 0
+ - result.stdout == 'Hello world!'
+ - result.stdout_lines == ['Hello world!']
+ - result.stderr == ''
+ - result.stderr_lines == []
+
+ - name: Execute in a present container (cat with stdin, newline but stripping)
+ docker_container_exec:
+ container: "{{ cname }}"
+ argv:
+ - /bin/sh
+ - '-c'
+ - cat
+ stdin: Hello world!
+ stdin_add_newline: true
+ strip_empty_ends: true
+ register: result
+
+ - assert:
+ that:
+ - result.rc == 0
+ - result.stdout == 'Hello world!'
+ - result.stdout_lines == ['Hello world!']
+ - result.stderr == ''
+ - result.stderr_lines == []
+
+ - name: Prepare long string
+ set_fact:
+ very_long_string: "{{ 'something long ' * 10000 }}"
+ very_long_string2: "{{ 'something else ' * 5000 }}"
+
+ - name: Execute in a present container (long stdin)
+ docker_container_exec:
+ container: "{{ cname }}"
+ argv:
+ - /bin/sh
+ - '-c'
+ - cat
+ stdin: |-
+ {{ very_long_string }}
+ {{ very_long_string2 }}
+ register: result
+
+ - assert:
+ that:
+ - result is changed
+ - result.rc == 0
+ - result.stdout == very_long_string ~ '\n' ~ very_long_string2
+ - result.stdout_lines == [very_long_string, very_long_string2]
+ - result.stderr == ''
+ - result.stderr_lines == []
+ - "'exec_id' not in result"
+
+ - name: Execute in a present container (detached)
+ docker_container_exec:
+ container: "{{ cname }}"
+ argv:
+ - /bin/sh
+ - '-c'
+ - sleep 1m
+ detach: true
+ register: result
+
+ - debug: var=result
+
+ - assert:
+ that:
+ - result is changed
+ - "'rc' not in result"
+ - "'stdout' not in result"
+ - "'stderr' not in result"
+ - result.exec_id is string
+
+ - name: Execute in a present container (environment variable)
+ docker_container_exec:
+ container: "{{ cname }}"
+ argv:
+ - /bin/sh
+ - '-c'
+ - 'echo "$FOO" ; echo $FOO > /dev/stderr'
+ env:
+ FOO: |-
+ bar
+ baz
+ register: result
+
+ - assert:
+ that:
+ - result.rc == 0
+ - result.stdout == 'bar\nbaz'
+ - result.stdout_lines == ['bar', 'baz']
+ - result.stderr == 'bar baz'
+ - result.stderr_lines == ['bar baz']
+
+ always:
+ - name: Cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_container_exec tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_container_info/aliases
new file mode 100644
index 00000000..0837c740
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/5
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container_info/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_info/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_container_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_container_info/tasks/main.yml
new file mode 100644
index 00000000..2df597eb
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_container_info/tasks/main.yml
@@ -0,0 +1,84 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Create random container name
+ set_fact:
+ cname: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+
+ - name: Make sure container is not there
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+
+ - name: Inspect a non-present container
+ docker_container_info:
+ name: "{{ cname }}"
+ register: result
+
+ - assert:
+ that:
+ - "not result.exists"
+ - "'container' in result"
+ - "result.container is none"
+
+ - name: Make sure container exists
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ state: started
+ force_kill: true
+
+ - name: Inspect a present container
+ docker_container_info:
+ name: "{{ cname }}"
+ register: result
+ - name: Dump docker_container_info result
+ debug: var=result
+
+ - name: "Comparison: use 'docker inspect'"
+ command: docker inspect "{{ cname }}"
+ register: docker_inspect
+ ignore_errors: true
+ - block:
+ - set_fact:
+ docker_inspect_result: "{{ docker_inspect.stdout | from_json }}"
+ - name: Dump docker inspect result
+ debug: var=docker_inspect_result
+ when: docker_inspect is not failed
+
+ - assert:
+ that:
+ - result.exists
+ - "'container' in result"
+ - "result.container"
+
+ - assert:
+ that:
+ - "result.container == docker_inspect_result[0]"
+ when: docker_inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in docker_inspect.stderr"
+ when: docker_inspect is failed
+
+ always:
+ - name: Cleanup
+ docker_container:
+ name: "{{ cname }}"
+ state: absent
+ force_kill: true
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_container_info tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_host_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_host_info/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_host_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_host_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_host_info/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_host_info/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/main.yml
new file mode 100644
index 00000000..e26790f3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_host_info.yml
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_host_info tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/test_host_info.yml b/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/test_host_info.yml
new file mode 100644
index 00000000..0d090db9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_host_info/tasks/test_host_info.yml
@@ -0,0 +1,364 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create random container/volume name
+ set_fact:
+ cname: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+ cname2: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+ vname: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+
+- debug:
+ msg: "Using container names '{{ cname }}' and '{{ cname2 }}', and volume name '{{ vname }}'"
+
+- block:
+ - name: Get info on Docker host
+ docker_host_info:
+ register: output
+
+ - name: assert reading docker host facts when docker is running
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+# Container and volume are created so that all lists are non-empty:
+# * container and volume lists are non-emtpy because of the created objects;
+# * image list is non-empty because the image of the container is there;
+# * network list is always non-empty (default networks).
+ - name: Create running container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -c "sleep 10m"'
+ name: "{{ cname }}"
+ labels:
+ key1: value1
+ key2: value2
+ state: started
+ register: container_output
+
+ - name: Create running container
+ docker_container:
+ image: "{{ docker_test_image_alpine }}"
+ name: "{{ cname2 }}"
+ labels:
+ key2: value2
+ key3: value3
+ state: stopped
+ register: container2_output
+
+ - assert:
+ that:
+ - container_output is changed
+ - container2_output is changed
+
+ - name: Create a volume
+ docker_volume:
+ name: "{{ vname }}"
+ register: volume_output
+
+ - assert:
+ that:
+ - volume_output is changed
+
+ - name: Get info on Docker host and list containers
+ docker_host_info:
+ containers: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list containers
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+ - 'output.containers[0].Image is string'
+ - 'output.containers[0].ImageID is not defined'
+
+ - name: Get info on Docker host and list containers matching filters (single label)
+ docker_host_info:
+ containers: true
+ containers_filters:
+ label: key1=value1
+ register: output
+
+ - name: assert container is returned when filters are matched (single label)
+ assert:
+ that: "{{ output.containers | length }} == 1"
+
+ - name: Get info on Docker host and list containers matching filters (multiple labels)
+ docker_host_info:
+ containers: true
+ containers_filters:
+ label:
+ - key1=value1
+ - key2=value2
+ register: output
+
+ - name: assert container is returned when filters are matched (multiple labels)
+ assert:
+ that: "{{ output.containers | length }} == 1"
+
+ - name: Get info on Docker host and do not list containers which do not match filters
+ docker_host_info:
+ containers: true
+ containers_filters:
+ label:
+ - key1=value1
+ - key2=value2
+ - key3=value3
+ register: output
+
+ - name: assert no container is returned when filters are not matched
+ assert:
+ that: "{{ output.containers | length }} == 0"
+
+ - name: Get info on Docker host and list containers matching filters (single label, not all containers)
+ docker_host_info:
+ containers: true
+ containers_all: false
+ containers_filters:
+ label: key2=value2
+ register: output
+
+ - name: Get info on Docker host and list containers matching filters (single label, all containers)
+ docker_host_info:
+ containers: true
+ containers_all: true
+ containers_filters:
+ label: key2=value2
+ register: output_all
+
+ - name: assert one resp. two container is returned
+ assert:
+ that:
+ - "{{ output.containers | length }} == 1"
+ - "{{ output_all.containers | length }} == 2"
+
+ - name: Get info on Docker host and list containers with verbose output
+ docker_host_info:
+ containers: true
+ verbose_output: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list containers with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+ - 'output.containers[0].Image is string'
+ - 'output.containers[0].ImageID is string'
+
+ - name: Get info on Docker host and list images
+ docker_host_info:
+ images: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list images
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images[0].Id is string'
+ - 'output.images[0].ParentId is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list images with verbose output
+ docker_host_info:
+ images: true
+ verbose_output: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list images with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images[0].Id is string'
+ - 'output.images[0].ParentId is string'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list networks
+ docker_host_info:
+ networks: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list networks
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks[0].Id is string'
+ - 'output.networks[0].Created is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list networks with verbose output
+ docker_host_info:
+ networks: true
+ verbose_output: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list networks with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks[0].Id is string'
+ - 'output.networks[0].Created is string'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list volumes
+ docker_host_info:
+ volumes: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list volumes
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes[0].Name is string'
+ - 'output.volumes[0].Mountpoint is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and list volumes with verbose output
+ docker_host_info:
+ volumes: true
+ verbose_output: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and list volumes with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes[0].Name is string'
+ - 'output.volumes[0].Mountpoint is string'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+ - name: Get info on Docker host and get disk usage
+ docker_host_info:
+ disk_usage: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and get disk usage
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage.LayersSize is number'
+ - 'output.disk_usage.Images is not defined'
+ - 'output.disk_usage.Containers is not defined'
+ - 'output.disk_usage.Volumes is not defined'
+
+ - name: Get info on Docker host and get disk usage with verbose output
+ docker_host_info:
+ disk_usage: true
+ verbose_output: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and get disk usage with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage.LayersSize is number'
+ - 'output.disk_usage.Images is sequence'
+ - 'output.disk_usage.Containers is sequence'
+ - 'output.disk_usage.Volumes is sequence'
+
+ - name: Get info on Docker host, disk usage and get all lists together
+ docker_host_info:
+ volumes: true
+ containers: true
+ networks: true
+ images: true
+ disk_usage: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running, disk usage and get lists together
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers[0].Image is string'
+ - 'output.containers[0].ImageID is not defined'
+ - 'output.networks[0].Id is string'
+ - 'output.networks[0].Created is not defined'
+ - 'output.volumes[0].Name is string'
+ - 'output.volumes[0].Mountpoint is not defined'
+ - 'output.images[0].Id is string'
+ - 'output.images[0].ParentId is not defined'
+ - 'output.disk_usage.LayersSize is number'
+ - 'output.disk_usage.Images is not defined'
+ - 'output.disk_usage.Containers is not defined'
+ - 'output.disk_usage.Volumes is not defined'
+
+ - name: Get info on Docker host, disk usage and get all lists together with verbose output
+ docker_host_info:
+ volumes: true
+ containers: true
+ networks: true
+ images: true
+ disk_usage: true
+ verbose_output: true
+ register: output
+
+ - name: assert reading docker host facts when docker is running and get disk usage with verbose output
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers[0].Image is string'
+ - 'output.containers[0].ImageID is string'
+ - 'output.networks[0].Id is string'
+ - 'output.networks[0].Created is string'
+ - 'output.volumes[0].Name is string'
+ - 'output.volumes[0].Mountpoint is string'
+ - 'output.images[0].Id is string'
+ - 'output.images[0].ParentId is string'
+ - 'output.disk_usage.LayersSize is number'
+ - 'output.disk_usage.Images is sequence'
+ - 'output.disk_usage.Containers is sequence'
+ - 'output.disk_usage.Volumes is sequence'
+
+ always:
+ - name: Delete containers
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ loop:
+ - "{{ cname }}"
+ - "{{ cname2 }}"
+
+ - name: Delete volume
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_image/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image/meta/main.yml
new file mode 100644
index 00000000..f7ba9ab1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker_registry
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/main.yml
new file mode 100644
index 00000000..88b23cfe
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ include_tasks:
+ file: test.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/test.yml
new file mode 100644
index 00000000..50bb84ff
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/test.yml
@@ -0,0 +1,54 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+- name: Create image and container list
+ set_fact:
+ inames: []
+ cnames: []
+
+- debug:
+ msg: "Using name prefix {{ name_prefix }}"
+
+- name: Create files directory
+ file:
+ path: '{{ remote_tmp_dir }}/files'
+ state: directory
+
+- name: Template files
+ template:
+ src: '{{ item }}'
+ dest: '{{ remote_tmp_dir }}/files/{{ item }}'
+ loop:
+ - ArgsDockerfile
+ - Dockerfile
+ - EtcHostsDockerfile
+ - MyDockerfile
+ - StagedDockerfile
+
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all images are removed"
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ inames }}"
+ - name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ with_items: "{{ cnames }}"
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_image tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/basic.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/basic.yml
new file mode 100644
index 00000000..78b4f773
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/basic.yml
@@ -0,0 +1,139 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+## basic ###########################################################
+####################################################################
+
+- name: Make sure image is not there
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: absent
+ force_absent: true
+ register: absent_1
+
+- name: Make sure image is not there (idempotency)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: absent
+ register: absent_2
+
+- assert:
+ that:
+ - absent_2 is not changed
+
+- name: Make sure image is there
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: present
+ source: pull
+ pull:
+ platform: amd64
+ register: present_1
+
+- name: Make sure image is there (idempotent)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: present
+ source: pull
+ pull:
+ platform: amd64
+ register: present_2
+
+- assert:
+ that:
+ - present_1 is changed
+ - present_2 is not changed
+
+- name: Make sure tag is not there
+ docker_image:
+ name: "{{ docker_test_image_hello_world_base }}:alias"
+ state: absent
+
+- name: Tag image with alias
+ docker_image:
+ source: local
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ docker_test_image_hello_world_base }}:alias"
+ register: tag_1
+
+- name: Tag image with alias (idempotent)
+ docker_image:
+ source: local
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ docker_test_image_hello_world_base }}:alias"
+ register: tag_2
+
+- name: Tag image with alias (force, still idempotent)
+ docker_image:
+ source: local
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ docker_test_image_hello_world_base }}:alias"
+ force_tag: true
+ register: tag_3
+
+- name: Tag image with ID instead of name
+ docker_image:
+ source: local
+ name: "{{ present_1.image.Id }}"
+ repository: "{{ docker_test_image_hello_world_base }}:alias"
+ register: tag_4
+
+- assert:
+ that:
+ - tag_1 is changed
+ - tag_2 is not changed
+ - tag_3 is not changed
+ - tag_4 is not changed
+
+- name: Cleanup alias tag
+ docker_image:
+ name: "{{ docker_test_image_hello_world_base }}:alias"
+ state: absent
+
+- name: Tag image with ID instead of name (use ID for repository, must fail)
+ docker_image:
+ source: local
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ present_1.image.Id }}"
+ register: fail_1
+ ignore_errors: true
+
+- name: Push image with ID (must fail)
+ docker_image:
+ source: local
+ name: "{{ present_1.image.Id }}"
+ push: true
+ register: fail_2
+ ignore_errors: true
+
+- name: Pull image ID (must fail)
+ docker_image:
+ source: pull
+ name: "{{ present_1.image.Id }}"
+ force_source: true
+ register: fail_3
+ ignore_errors: true
+
+- name: buildargs
+ docker_image:
+ source: build
+ name: "{{ present_1.image.Id }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ force_source: true
+ register: fail_4
+ ignore_errors: true
+
+- assert:
+ that:
+ - fail_1 is failed
+ - "'`repository` must not be an image ID' in fail_1.msg"
+ - fail_2 is failed
+ - "'Cannot push an image by ID' in fail_2.msg"
+ - fail_3 is failed
+ - "'Image name must not be an image ID for source=pull' in fail_3.msg"
+ - fail_4 is failed
+ - "'Image name must not be an image ID for source=build' in fail_4.msg"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/docker_image.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/docker_image.yml
new file mode 100644
index 00000000..a13eb691
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/docker_image.yml
@@ -0,0 +1,259 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering image name
+ set_fact:
+ iname: "{{ name_prefix ~ '-options' }}"
+
+- name: Determining pushed image names
+ set_fact:
+ hello_world_image_base: "{{ registry_address }}/test/hello-world"
+ test_image_base: "{{ registry_address }}/test/{{ iname }}"
+
+- name: Registering image name
+ set_fact:
+ inames: "{{ inames + [iname, test_image_base ~ ':latest', test_image_base ~ ':other', hello_world_image_base ~ ':latest', hello_world_image_base ~ ':newtag', hello_world_image_base ~ ':newtag2'] }}"
+
+####################################################################
+## interact with test registry #####################################
+####################################################################
+
+- name: Make sure image is not there
+ docker_image:
+ name: "{{ hello_world_image_base }}:latest"
+ state: absent
+ force_absent: true
+
+- name: Make sure we have {{ docker_test_image_hello_world }}
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ source: pull
+
+- name: Push image to test registry
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ hello_world_image_base }}:latest"
+ push: true
+ source: local
+ register: push_1
+
+- name: Push image to test registry (idempotent)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ hello_world_image_base }}:latest"
+ push: true
+ source: local
+ register: push_2
+
+- name: Push image to test registry (force, still idempotent)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ hello_world_image_base }}:latest"
+ push: true
+ source: local
+ force_tag: true
+ register: push_3
+
+- assert:
+ that:
+ - push_1 is changed
+ - push_2 is not changed
+ - push_3 is not changed
+
+- name: Get facts of local image
+ docker_image_info:
+ name: "{{ hello_world_image_base }}:latest"
+ register: facts_1
+
+- name: Make sure image is not there
+ docker_image:
+ name: "{{ hello_world_image_base }}:latest"
+ state: absent
+ force_absent: true
+
+- name: Get facts of local image (absent)
+ docker_image_info:
+ name: "{{ hello_world_image_base }}:latest"
+ register: facts_2
+
+- name: Pull image from test registry
+ docker_image:
+ name: "{{ hello_world_image_base }}:latest"
+ state: present
+ source: pull
+ register: pull_1
+
+- name: Pull image from test registry (idempotency)
+ docker_image:
+ name: "{{ hello_world_image_base }}:latest"
+ state: present
+ source: pull
+ register: pull_2
+
+- name: Get facts of local image (present)
+ docker_image_info:
+ name: "{{ hello_world_image_base }}:latest"
+ register: facts_3
+
+- assert:
+ that:
+ - pull_1 is changed
+ - pull_2 is not changed
+ - facts_1.images | length == 1
+ - facts_2.images | length == 0
+ - facts_3.images | length == 1
+
+- name: Pull image from test registry (with digest)
+ docker_image:
+ name: "{{ facts_3.images[0].RepoDigests[0] }}"
+ state: present
+ source: pull
+ force_source: true
+ register: pull_digest
+
+- name: Make sure that changed is still false
+ assert:
+ that:
+ - pull_digest is not changed
+
+- name: Tag different image with new tag
+ docker_image:
+ name: "{{ docker_test_image_alpine_different }}"
+ repository: "{{ hello_world_image_base }}:newtag"
+ push: false
+ source: pull
+
+- name: Push different image with new tag
+ docker_image:
+ name: "{{ hello_world_image_base }}"
+ repository: "{{ hello_world_image_base }}"
+ tag: newtag
+ push: true
+ source: local
+ register: push_1_different
+
+- name: Push different image with new tag (idempotent)
+ docker_image:
+ name: "{{ hello_world_image_base }}"
+ repository: "{{ hello_world_image_base }}"
+ tag: newtag
+ push: true
+ source: local
+ register: push_2_different
+
+- assert:
+ that:
+ - push_1_different is changed
+ - push_2_different is not changed
+
+- name: Tag same image with new tag
+ docker_image:
+ name: "{{ docker_test_image_alpine_different }}"
+ repository: "{{ hello_world_image_base }}:newtag2"
+ push: false
+ source: pull
+
+- name: Push same image with new tag
+ docker_image:
+ name: "{{ hello_world_image_base }}"
+ repository: "{{ hello_world_image_base }}"
+ tag: newtag2
+ push: true
+ source: local
+ register: push_1_same
+
+- name: Push same image with new tag (idempotent)
+ docker_image:
+ name: "{{ hello_world_image_base }}"
+ repository: "{{ hello_world_image_base }}"
+ tag: newtag2
+ push: true
+ source: local
+ register: push_2_same
+
+- assert:
+ that:
+ # NOTE: This should be:
+ # - push_1_same is changed
+ # Unfortunately docker does *NOT* report whether the tag already existed or not.
+ # Here are the logs returned by client.push() for both tasks (which are exactly the same):
+ # push_1_same:
+ # {"status": "The push refers to repository [localhost:32796/test/hello-world]"},
+ # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Preparing"},
+ # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Layer already exists"},
+ # {"status": "newtag2: digest: sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b size: 528"},
+ # {"aux": {"Digest": "sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b", "Size": 528, "Tag": "newtag2"}, "progressDetail": {}}
+ # push_2_same:
+ # {"status": "The push refers to repository [localhost:32796/test/hello-world]"},
+ # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Preparing"},
+ # {"id": "3fc64803ca2d", "progressDetail": {}, "status": "Layer already exists"},
+ # {"status": "newtag2: digest: sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b size: 528"},
+ # {"aux": {"Digest": "sha256:92251458088c638061cda8fd8b403b76d661a4dc6b7ee71b6affcf1872557b2b", "Size": 528, "Tag": "newtag2"}, "progressDetail": {}}
+ - push_1_same is not changed
+ - push_2_same is not changed
+
+####################################################################
+## repository ######################################################
+####################################################################
+
+- name: Make sure image is not there
+ docker_image:
+ name: "{{ test_image_base }}:latest"
+ state: absent
+ force_absent: true
+
+- name: repository
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ pull: false
+ repository: "{{ test_image_base }}"
+ source: build
+ register: repository_1
+
+- name: repository (idempotent)
+ docker_image:
+ name: "{{ iname }}"
+ repository: "{{ test_image_base }}"
+ source: local
+ register: repository_2
+
+- name: repository, tag with ID
+ docker_image:
+ name: "{{ repository_1.image.Id }}"
+ repository: "{{ test_image_base }}:other"
+ source: local
+ register: repository_3
+
+- name: repository, tag with ID (idempotent)
+ docker_image:
+ name: "{{ repository_1.image.Id }}"
+ repository: "{{ test_image_base }}:other"
+ source: local
+ force_tag: true
+ register: repository_4
+
+- assert:
+ that:
+ - repository_1 is changed
+ - repository_2 is not changed
+ - repository_3 is changed
+ - repository_4 is not changed
+
+- name: Get facts of image
+ docker_image_info:
+ name: "{{ test_image_base }}:latest"
+ register: facts_1
+
+- name: cleanup
+ docker_image:
+ name: "{{ test_image_base }}:latest"
+ state: absent
+ force_absent: true
+
+- assert:
+ that:
+ - facts_1.images | length == 1
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/options.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/options.yml
new file mode 100644
index 00000000..0670f133
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/tasks/tests/options.yml
@@ -0,0 +1,446 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering image name
+ set_fact:
+ iname: "{{ name_prefix ~ '-options' }}"
+ iname_1: "{{ name_prefix ~ '-options-1' }}"
+ hello_world_alt: "{{ name_prefix }}-hello-world-alt:v1.2.3-foo"
+
+- name: Registering image name
+ set_fact:
+ inames: "{{ inames + [iname, iname_1, hello_world_alt] }}"
+
+####################################################################
+## build.args ######################################################
+####################################################################
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- name: buildargs
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ dockerfile: "ArgsDockerfile"
+ args:
+ IMAGE: "{{ docker_test_image_busybox }}"
+ TEST1: val1
+ TEST2: val2
+ TEST3: "True"
+ pull: false
+ source: build
+ register: buildargs_1
+ ignore_errors: true
+
+- name: buildargs (idempotency)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ dockerfile: "ArgsDockerfile"
+ args:
+ IMAGE: "{{ docker_test_image_busybox }}"
+ TEST1: val1
+ TEST2: val2
+ TEST3: "True"
+ pull: false
+ source: build
+ register: buildargs_2
+ ignore_errors: true
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- assert:
+ that:
+ - buildargs_1 is changed
+ - buildargs_2 is not failed and buildargs_2 is not changed
+
+####################################################################
+## build.container_limits ##########################################
+####################################################################
+
+- name: container_limits (Failed due to min memory limit)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ container_limits:
+ memory: 4000
+ pull: false
+ source: build
+ ignore_errors: true
+ register: container_limits_1
+
+- name: container_limits
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ container_limits:
+ memory: 7000000
+ memswap: 8000000
+ pull: false
+ source: build
+ register: container_limits_2
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- assert:
+ that:
+ # It *sometimes* happens that the first task does not fail.
+ # For now, we work around this by
+ # a) requiring that if it fails, the message must
+ # contain 'Minimum memory limit allowed is (4|6)MB', and
+ # b) requiring that either the first task, or the second
+ # task is changed, but not both.
+ - "not container_limits_1 is failed or ('Minimum memory limit allowed is ') in container_limits_1.msg"
+ - "container_limits_1 is changed or container_limits_2 is changed and not (container_limits_1 is changed and container_limits_2 is changed)"
+
+####################################################################
+## build.dockerfile ################################################
+####################################################################
+
+- name: dockerfile
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ dockerfile: "MyDockerfile"
+ pull: false
+ source: build
+ register: dockerfile_1
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- assert:
+ that:
+ - dockerfile_1 is changed
+ - "('FROM ' ~ docker_test_image_alpine) in dockerfile_1.stdout"
+ - dockerfile_1['image']['Config']['WorkingDir'] == '/newdata'
+
+####################################################################
+## build.platform ##################################################
+####################################################################
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- name: build.platform
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ platform: linux
+ pull: false
+ source: build
+ register: platform_1
+ ignore_errors: true
+
+- name: build.platform (idempotency)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ platform: linux
+ pull: false
+ source: build
+ register: platform_2
+ ignore_errors: true
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- assert:
+ that:
+ - platform_1 is changed
+ - platform_2 is not failed and platform_2 is not changed
+
+####################################################################
+## force ###########################################################
+####################################################################
+
+- name: Build an image
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ pull: false
+ source: build
+
+- name: force (changed)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ dockerfile: "MyDockerfile"
+ pull: false
+ source: build
+ force_source: true
+ register: force_1
+
+- name: force (unchanged)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ dockerfile: "MyDockerfile"
+ pull: false
+ source: build
+ force_source: true
+ register: force_2
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- assert:
+ that:
+ - force_1 is changed
+ - force_2 is not changed
+
+####################################################################
+## load path #######################################################
+####################################################################
+
+- name: Archive image
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ archive_path: "{{ remote_tmp_dir }}/image.tar"
+ source: pull
+ register: archive_image
+
+- assert:
+ that:
+ - archive_image is changed
+
+- name: Copy archive because we will mutate it but other tests need the original
+ copy:
+ remote_src: true
+ src: "{{ remote_tmp_dir }}/image.tar"
+ dest: "{{ remote_tmp_dir }}/image_mutated.tar"
+
+- name: Archive image again (idempotent)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ archive_path: "{{ remote_tmp_dir }}/image_mutated.tar"
+ source: local
+ register: archive_image_2
+
+- assert:
+ that:
+ - archive_image_2 is not changed
+
+- name: Archive image 3rd time, should overwrite due to different id
+ docker_image:
+ name: "{{ docker_test_image_alpine_different }}"
+ archive_path: "{{ remote_tmp_dir }}/image_mutated.tar"
+ source: pull
+ register: archive_image_3
+
+- assert:
+ that:
+ - archive_image_3 is changed
+
+- name: Reset archive
+ copy:
+ remote_src: true
+ src: "{{ remote_tmp_dir }}/image.tar"
+ dest: "{{ remote_tmp_dir }}/image_mutated.tar"
+
+- name: Tag image with different name
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ repository: "{{ hello_world_alt }}"
+ source: local
+
+- name: Archive image 4th time, should overwrite due to different name even when ID is same
+ docker_image:
+ name: "{{ hello_world_alt }}"
+ # Tagged as docker_test_image_hello_world but has same hash/id (before this task overwrites it)
+ archive_path: "{{ remote_tmp_dir }}/image_mutated.tar"
+ source: local
+ register: archive_image_4
+
+- assert:
+ that:
+ - archive_image_4 is changed
+
+# This is the test that needs the original, non-mutated archive
+- name: Archive image by ID
+ docker_image:
+ name: "{{ archive_image.image.Id }}"
+ archive_path: "{{ remote_tmp_dir }}/image_id.tar"
+ source: local
+ register: archive_image_id
+
+- name: Create invalid archive
+ copy:
+ dest: "{{ remote_tmp_dir }}/image-invalid.tar"
+ content: "this is not a valid image"
+
+- name: remove image
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ state: absent
+ force_absent: true
+
+- name: load image (changed)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ load_path: "{{ remote_tmp_dir }}/image.tar"
+ source: load
+ register: load_image
+
+- name: load image (idempotency)
+ docker_image:
+ name: "{{ docker_test_image_hello_world }}"
+ load_path: "{{ remote_tmp_dir }}/image.tar"
+ source: load
+ register: load_image_1
+
+- name: load image (wrong name)
+ docker_image:
+ name: foo:bar
+ load_path: "{{ remote_tmp_dir }}/image.tar"
+ source: load
+ register: load_image_2
+ ignore_errors: true
+
+- name: load image (invalid image)
+ docker_image:
+ name: foo:bar
+ load_path: "{{ remote_tmp_dir }}/image-invalid.tar"
+ source: load
+ register: load_image_3
+ ignore_errors: true
+
+- name: load image (ID, idempotency)
+ docker_image:
+ name: "{{ archive_image.image.Id }}"
+ load_path: "{{ remote_tmp_dir }}/image_id.tar"
+ source: load
+ register: load_image_4
+
+- assert:
+ that:
+ - load_image is changed
+ - archive_image['image']['Id'] == load_image['image']['Id']
+ - load_image_1 is not changed
+ - load_image_2 is failed
+ - >-
+ "The archive did not contain image 'foo:bar'. Instead, found '" ~ docker_test_image_hello_world ~ "'." == load_image_2.msg
+ - load_image_3 is failed
+ - '"Detected no loaded images. Archive potentially corrupt?" == load_image_3.msg'
+ - load_image_4 is not changed
+
+####################################################################
+## build.path ######################################################
+####################################################################
+
+- name: Build image
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ pull: false
+ source: build
+ register: path_1
+
+- name: Build image (idempotency)
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ pull: false
+ source: build
+ register: path_2
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- assert:
+ that:
+ - path_1 is changed
+ - path_2 is not changed
+
+####################################################################
+## build.target ####################################################
+####################################################################
+
+- name: Build multi-stage image
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ dockerfile: "StagedDockerfile"
+ target: first
+ pull: false
+ source: build
+ register: dockerfile_2
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- assert:
+ that:
+ - dockerfile_2 is changed
+ - dockerfile_2.image.Config.WorkingDir == '/first'
+
+####################################################################
+## build.etc_hosts #################################################
+####################################################################
+
+- name: Build image with custom etc_hosts
+ docker_image:
+ name: "{{ iname }}"
+ build:
+ path: "{{ remote_tmp_dir }}/files"
+ dockerfile: "EtcHostsDockerfile"
+ pull: false
+ etc_hosts:
+ some-custom-host: "127.0.0.1"
+ source: build
+ register: path_1
+
+- name: cleanup
+ docker_image:
+ name: "{{ iname }}"
+ state: absent
+ force_absent: true
+
+- assert:
+ that:
+ - path_1 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/ArgsDockerfile b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/ArgsDockerfile
new file mode 100644
index 00000000..dedd88a8
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/ArgsDockerfile
@@ -0,0 +1,13 @@
+# Copyright (c) 2022, Felix Fontein
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+ARG IMAGE
+ARG TEST1
+ARG TEST2
+ARG TEST3
+
+FROM ${IMAGE}
+ENV foo /bar
+WORKDIR ${foo}
+RUN echo "${TEST1} - ${TEST2} - ${TEST3}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/Dockerfile b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/Dockerfile
new file mode 100644
index 00000000..286094b9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/Dockerfile
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+FROM {{ docker_test_image_busybox }}
+ENV foo /bar
+WORKDIR ${foo}
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/EtcHostsDockerfile b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/EtcHostsDockerfile
new file mode 100644
index 00000000..bc21b966
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/EtcHostsDockerfile
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+FROM {{ docker_test_image_busybox }}
+# This should fail building if docker cannot resolve some-custom-host
+RUN ping -c1 some-custom-host
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/MyDockerfile b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/MyDockerfile
new file mode 100644
index 00000000..24b1c926
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/MyDockerfile
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+FROM {{ docker_test_image_alpine }}
+ENV INSTALL_PATH /newdata
+RUN mkdir -p $INSTALL_PATH
+
+WORKDIR $INSTALL_PATH
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/StagedDockerfile b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/StagedDockerfile
new file mode 100644
index 00000000..da225342
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image/templates/StagedDockerfile
@@ -0,0 +1,11 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+FROM {{ docker_test_image_busybox }} AS first
+ENV dir /first
+WORKDIR ${dir}
+
+FROM {{ docker_test_image_busybox }} AS second
+ENV dir /second
+WORKDIR ${dir}
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_image_info/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image_info/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image_info/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image_info/tasks/main.yml
new file mode 100644
index 00000000..5bd053ac
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image_info/tasks/main.yml
@@ -0,0 +1,63 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Make sure image is not there
+ docker_image:
+ name: "{{ docker_test_image_alpine_different }}"
+ state: absent
+
+ - name: Inspect a non-available image
+ docker_image_info:
+ name: "{{ docker_test_image_alpine_different }}"
+ register: result
+
+ - assert:
+ that:
+ - "result.images|length == 0"
+
+ - name: Make sure images are there
+ docker_image:
+ name: "{{ item }}"
+ source: pull
+ state: present
+ loop:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_alpine }}"
+
+ - name: Inspect an available image
+ docker_image_info:
+ name: "{{ docker_test_image_hello_world }}"
+ register: result
+
+ - assert:
+ that:
+ - "result.images|length == 1"
+ - "docker_test_image_hello_world in result.images[0].RepoTags"
+
+ - name: Inspect multiple images
+ docker_image_info:
+ name:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_alpine }}"
+ register: result
+
+ - debug: var=result
+
+ - assert:
+ that:
+ - "result.images|length == 2"
+ - "docker_test_image_hello_world in result.images[0].RepoTags"
+ - "docker_test_image_alpine in result.images[1].RepoTags"
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_image_info tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image_load/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image_load/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/meta/main.yml
new file mode 100644
index 00000000..2650229d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/main.yml
new file mode 100644
index 00000000..88b23cfe
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ include_tasks:
+ file: test.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/test.yml
new file mode 100644
index 00000000..a56c9530
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/test.yml
@@ -0,0 +1,38 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+- name: Create image and container list
+ set_fact:
+ inames: []
+ cnames: []
+
+- debug:
+ msg: "Using name prefix {{ name_prefix }}"
+
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all images are removed"
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ inames }}"
+ - name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ with_items: "{{ cnames }}"
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_image tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/tests/basic.yml b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/tests/basic.yml
new file mode 100644
index 00000000..8d9de994
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_image_load/tasks/tests/basic.yml
@@ -0,0 +1,217 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- set_fact:
+ image_names:
+ - "{{ docker_test_image_hello_world }}"
+ - "{{ docker_test_image_alpine_different }}"
+ - "{{ docker_test_image_alpine }}"
+
+- name: Make sure images are there
+ docker_image:
+ name: "{{ item }}"
+ source: pull
+ register: images
+ loop: "{{ image_names }}"
+
+- name: Compile list of all image names and IDs
+ set_fact:
+ image_ids: "{{ images.results | map(attribute='image') | map(attribute='Id') | list }}"
+ all_images: "{{ image_names + (images.results | map(attribute='image') | map(attribute='Id') | list) }}"
+
+- name: Create archives
+ command: docker save {{ item.images | join(' ') }} -o {{ remote_tmp_dir }}/{{ item.file }}
+ loop:
+ - file: archive-1.tar
+ images: "{{ image_names }}"
+ - file: archive-2.tar
+ images: "{{ image_ids }}"
+ - file: archive-3.tar
+ images:
+ - "{{ image_names[0] }}"
+ - "{{ image_ids[1] }}"
+ - file: archive-4.tar
+ images:
+ - "{{ image_ids[0] }}"
+ - "{{ image_names[0] }}"
+ - file: archive-5.tar
+ images:
+ - "{{ image_ids[0] }}"
+
+# All images by IDs
+
+- name: Remove all images
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ force_absent: true
+ loop: "{{ all_images }}"
+ ignore_errors: true
+ register: remove_all_images
+
+- name: Prune all containers (if removing failed)
+ docker_prune:
+ containers: true
+ when: remove_all_images is failed
+
+- name: Obtain all docker containers and images (if removing failed)
+ shell: docker ps -a ; docker images -a
+ when: remove_all_images is failed
+ register: docker_container_image_list
+
+- name: Show all docker containers and images (if removing failed)
+ debug:
+ var: docker_container_image_list.stdout_lines
+ when: remove_all_images is failed
+
+- name: Remove all images (after pruning)
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ force_absent: true
+ loop: "{{ all_images }}"
+ when: remove_all_images is failed
+
+- name: Load all images (IDs)
+ docker_image_load:
+ path: "{{ remote_tmp_dir }}/archive-2.tar"
+ register: result
+
+- name: Print loaded image names
+ debug:
+ var: result.image_names
+
+- assert:
+ that:
+ - result is changed
+ - result.image_names | sort == image_ids | sort
+ - result.image_names | length == result.images | length
+
+- name: Load all images (IDs, should be same result)
+ docker_image_load:
+ path: "{{ remote_tmp_dir }}/archive-2.tar"
+ register: result_2
+
+- name: Print loaded image names
+ debug:
+ var: result_2.image_names
+
+- assert:
+ that:
+ - result_2 is changed
+ - result_2.image_names | sort == image_ids | sort
+ - result_2.image_names | length == result_2.images | length
+
+# Mixed images and IDs
+
+- name: Remove all images
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ all_images }}"
+
+- name: Load all images (mixed images and IDs)
+ docker_image_load:
+ path: "{{ remote_tmp_dir }}/archive-3.tar"
+ register: result
+
+- name: Print loading log
+ debug:
+ var: result.stdout_lines
+
+- name: Print loaded image names
+ debug:
+ var: result.image_names
+
+- assert:
+ that:
+ - result is changed
+ # For some reason, *sometimes* only the named image is found; in fact, in that case, the log only mentions that image and nothing else
+ - "result.images | length == 3 or ('Loaded image: ' ~ docker_test_image_hello_world) == result.stdout"
+ - (result.image_names | sort) in [[image_names[0], image_ids[0], image_ids[1]] | sort, [image_names[0]]]
+ - result.images | length in [1, 3]
+ - (result.images | map(attribute='Id') | sort) in [[image_ids[0], image_ids[0], image_ids[1]] | sort, [image_ids[0]]]
+
+# Same image twice
+
+- name: Remove all images
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ all_images }}"
+
+- name: Load all images (same image twice)
+ docker_image_load:
+ path: "{{ remote_tmp_dir }}/archive-4.tar"
+ register: result
+
+- name: Print loaded image names
+ debug:
+ var: result.image_names
+
+- assert:
+ that:
+ - result is changed
+ - result.image_names | length == 1
+ - result.image_names[0] == image_names[0]
+ - result.images | length == 1
+ - result.images[0].Id == image_ids[0]
+
+# Single image by ID
+
+- name: Remove all images
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ all_images }}"
+
+- name: Load all images (single image by ID)
+ docker_image_load:
+ path: "{{ remote_tmp_dir }}/archive-5.tar"
+ register: result
+
+- name: Print loaded image names
+ debug:
+ var: result.image_names
+
+- assert:
+ that:
+ - result is changed
+ - result.image_names | length == 1
+ - result.image_names[0] == image_ids[0]
+ - result.images | length == 1
+ - result.images[0].Id == image_ids[0]
+
+- name: Try to get image info by name
+ docker_image_info:
+ name: "{{ image_names[0] }}"
+ register: result
+
+- name: Make sure that image does not exist by name
+ assert:
+ that:
+ - result.images | length == 0
+
+# All images by names
+
+- name: Remove all images
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ all_images }}"
+
+- name: Load all images (names)
+ docker_image_load:
+ path: "{{ remote_tmp_dir }}/archive-1.tar"
+ register: result
+
+- name: Print loaded image names
+ debug:
+ var: result.image_names
+
+- assert:
+ that:
+ - result.image_names | sort == image_names | sort
+ - result.image_names | length == result.images | length
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_login/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_login/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_login/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_login/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_login/meta/main.yml
new file mode 100644
index 00000000..3133a036
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_login/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker_registry
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/main.yml
new file mode 100644
index 00000000..88b23cfe
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ include_tasks:
+ file: test.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/test.yml
new file mode 100644
index 00000000..bd99acc0
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/test.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_image tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/docker_login.yml b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/docker_login.yml
new file mode 100644
index 00000000..efb3efc1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/docker_login.yml
@@ -0,0 +1,150 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Log out server
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: absent
+
+ - name: Log in with wrong password (check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: "1234"
+ state: present
+ register: login_failed_check
+ ignore_errors: true
+ check_mode: true
+
+ - name: Log in with wrong password
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: "1234"
+ state: present
+ register: login_failed
+ ignore_errors: true
+
+ - name: Make sure that login failed
+ assert:
+ that:
+ - login_failed_check is failed
+ - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed_check.msg"
+ - login_failed is failed
+ - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed.msg"
+
+ - name: Log in (check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_1
+ check_mode: true
+
+ - name: Log in
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_2
+
+ - name: Get permissions of ~/.docker/config.json
+ stat:
+ path: ~/.docker/config.json
+ register: login_2_stat
+
+ - name: Log in (idempotent)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_3
+
+ - name: Log in (idempotent, check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_4
+ check_mode: true
+
+ - name: Make sure that login worked
+ assert:
+ that:
+ - login_1 is changed
+ - login_2 is changed
+ - login_3 is not changed
+ - login_4 is not changed
+ - login_2_stat.stat.mode == '0600'
+
+ - name: Log in again with wrong password (check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: "1234"
+ state: present
+ register: login_failed_check
+ ignore_errors: true
+ check_mode: true
+
+ - name: Log in again with wrong password
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: "1234"
+ state: present
+ register: login_failed
+ ignore_errors: true
+
+ - name: Make sure that login failed again
+ assert:
+ that:
+ - login_failed_check is failed
+ - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed_check.msg"
+ - login_failed is failed
+ - "('login attempt to http://' ~ registry_frontend_address ~ '/v2/ failed') in login_failed.msg"
+
+ - name: Log out (check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ state: absent
+ register: logout_1
+ check_mode: true
+
+ - name: Log out
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ state: absent
+ register: logout_2
+
+ - name: Log out (idempotent)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ state: absent
+ register: logout_3
+
+ - name: Log out (idempotent, check mode)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ state: absent
+ register: logout_4
+ check_mode: true
+
+ - name: Make sure that login worked
+ assert:
+ that:
+ - logout_1 is changed
+ - logout_2 is changed
+ - logout_3 is not changed
+ - logout_4 is not changed
+
+ when: registry_frontend_address != 'n/a'
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/multiple-servers.yml b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/multiple-servers.yml
new file mode 100644
index 00000000..7ffd0978
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_login/tasks/tests/multiple-servers.yml
@@ -0,0 +1,61 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Log out server 1
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: absent
+
+ - name: Log out server 2
+ docker_login:
+ registry_url: "{{ registry_frontend2_address }}"
+ username: testuser
+ password: hunter2
+ state: absent
+
+ - name: Log in server 1
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_1
+
+ - name: Log in server 2
+ docker_login:
+ registry_url: "{{ registry_frontend2_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_2
+
+ - name: Log in server 1 (idempotent)
+ docker_login:
+ registry_url: "{{ registry_frontend_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_1_2
+
+ - name: Log in server 2 (idempotent)
+ docker_login:
+ registry_url: "{{ registry_frontend2_address }}"
+ username: testuser
+ password: hunter2
+ state: present
+ register: login_2_2
+
+ - name: Make sure that login worked
+ assert:
+ that:
+ - login_1 is changed
+ - login_2 is changed
+ - login_1_2 is not changed
+ - login_2_2 is not changed
+
+ when: registry_frontend_address != 'n/a' and registry_frontend2_address != 'n/a'
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_network/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/main.yml
new file mode 100644
index 00000000..4a056151
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/main.yml
@@ -0,0 +1,52 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: List inspection results for all docker networks
+ docker_host_info:
+ networks: true
+ verbose_output: true
+ register: all_networks
+
+- name: Show inspection results for all docker networks
+ debug:
+ var: all_networks.networks
+
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+ cnames: []
+ dnetworks: []
+
+- debug:
+ msg: "Using name prefix {{ name_prefix }}"
+
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ loop: "{{ cnames }}"
+ - name: "Make sure all networks are removed"
+ docker_network:
+ name: "{{ item }}"
+ state: absent
+ force: true
+ loop: "{{ dnetworks }}"
+
+ when: docker_api_version is version('1.25', '>=') # FIXME: find out API version!
+
+- fail: msg="Too old docker / docker-py version to run docker_network tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/basic.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/basic.yml
new file mode 100644
index 00000000..1a419c73
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/basic.yml
@@ -0,0 +1,138 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container and network names
+ set_fact:
+ cname_1: "{{ name_prefix ~ '-container-1' }}"
+ cname_2: "{{ name_prefix ~ '-container-2' }}"
+ cname_3: "{{ name_prefix ~ '-container-3' }}"
+ nname_1: "{{ name_prefix ~ '-network-1' }}"
+ nname_2: "{{ name_prefix ~ '-network-2' }}"
+- name: Registering container and network names
+ set_fact:
+ cnames: "{{ cnames + [cname_1, cname_2, cname_3] }}"
+ dnetworks: "{{ dnetworks + [nname_1, nname_2] }}"
+
+- name: Create containers
+ docker_container:
+ name: "{{ container_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: /bin/sleep 10m
+ state: started
+ loop:
+ - "{{ cname_1 }}"
+ - "{{ cname_2 }}"
+ - "{{ cname_3 }}"
+ loop_control:
+ loop_var: container_name
+
+####################################################################
+
+- name: Create network
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ register: networks_1
+
+- name: Connect network to containers 1
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_1 }}"
+ register: networks_2
+
+- name: Connect network to containers 1 (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_1 }}"
+ register: networks_2_idem
+
+- name: Connect network to containers 1 and 2
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_1 }}"
+ - "{{ cname_2 }}"
+ register: networks_3
+
+- name: Connect network to containers 1 and 2 (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_1 }}"
+ - "{{ cname_2 }}"
+ register: networks_3_idem
+
+- name: Connect network to container 3
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_3 }}"
+ appends: true
+ register: networks_4
+
+- name: Connect network to container 3 (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_3 }}"
+ appends: true
+ register: networks_4_idem
+
+- name: Disconnect network from container 1
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_2 }}"
+ - "{{ cname_3 }}"
+ register: networks_5
+
+- name: Disconnect network from container 1 (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ connected:
+ - "{{ cname_2 }}"
+ - "{{ cname_3 }}"
+ register: networks_5_idem
+
+- name: Cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+
+- assert:
+ that:
+ - networks_1 is changed
+ - networks_2 is changed
+ - networks_2_idem is not changed
+ - networks_3 is changed
+ - networks_3_idem is not changed
+ - networks_4 is changed
+ - networks_4_idem is not changed
+ - networks_5 is changed
+ - networks_5_idem is not changed
+
+####################################################################
+
+- name: Delete containers
+ docker_container:
+ name: "{{ container_name }}"
+ state: absent
+ force_kill: true
+ loop:
+ - "{{ cname_1 }}"
+ - "{{ cname_2 }}"
+ - "{{ cname_3 }}"
+ loop_control:
+ loop_var: container_name
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/ipam.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/ipam.yml
new file mode 100644
index 00000000..7091e95f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/ipam.yml
@@ -0,0 +1,309 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering network names
+ set_fact:
+ nname_ipam_0: "{{ name_prefix ~ '-network-ipam-0' }}"
+ nname_ipam_1: "{{ name_prefix ~ '-network-ipam-1' }}"
+ nname_ipam_2: "{{ name_prefix ~ '-network-ipam-2' }}"
+ nname_ipam_3: "{{ name_prefix ~ '-network-ipam-3' }}"
+
+- name: Registering network names
+ set_fact:
+ dnetworks: "{{ dnetworks + [nname_ipam_0, nname_ipam_1, nname_ipam_2, nname_ipam_3] }}"
+
+
+#################### IPv4 IPAM config ####################
+
+- name: Create network with custom IPAM config
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ ipam_config:
+ - subnet: 10.25.120.0/24
+ gateway: 10.25.120.2
+ iprange: 10.25.120.0/26
+ aux_addresses:
+ host1: 10.25.120.3
+ host2: 10.25.120.4
+ register: network
+
+- assert:
+ that:
+ - network is changed
+
+- name: Create network with custom IPAM config (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ ipam_config:
+ - subnet: 10.25.120.0/24
+ gateway: 10.25.120.2
+ iprange: 10.25.120.0/26
+ aux_addresses:
+ host1: 10.25.120.3
+ host2: 10.25.120.4
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Change of network created with custom IPAM config
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ ipam_config:
+ - subnet: 10.25.121.0/24
+ gateway: 10.25.121.2
+ iprange: 10.25.121.0/26
+ aux_addresses:
+ host1: 10.25.121.3
+ register: network
+ diff: true
+
+- assert:
+ that:
+ - network is changed
+ - network.diff.differences | length == 4
+ - '"ipam_config[0].subnet" in network.diff.differences'
+ - '"ipam_config[0].gateway" in network.diff.differences'
+ - '"ipam_config[0].iprange" in network.diff.differences'
+ - '"ipam_config[0].aux_addresses" in network.diff.differences'
+
+- name: Remove gateway and iprange of network with custom IPAM config
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ ipam_config:
+ - subnet: 10.25.121.0/24
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Cleanup network with custom IPAM config
+ docker_network:
+ name: "{{ nname_ipam_1 }}"
+ state: absent
+
+
+#################### IPv6 IPAM config ####################
+
+- name: Create network with IPv6 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ enable_ipv6: true
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce0::/64
+ register: network
+
+- assert:
+ that:
+ - network is changed
+
+- name: Create network with IPv6 IPAM config (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ enable_ipv6: true
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce0::/64
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Change subnet of network with IPv6 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ enable_ipv6: true
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce1::/64
+ register: network
+ diff: true
+
+- assert:
+ that:
+ - network is changed
+ - network.diff.differences | length == 1
+ - network.diff.differences[0] == "ipam_config[0].subnet"
+
+- name: Change subnet of network with IPv6 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ enable_ipv6: true
+ ipam_config:
+ - subnet: "fdd1:ac8c:0557:7ce1::"
+ register: network
+ ignore_errors: true
+
+- assert:
+ that:
+ - network is failed
+ - "network.msg == '\"fdd1:ac8c:0557:7ce1::\" is not a valid CIDR'"
+
+- name: Cleanup network with IPv6 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_2 }}"
+ state: absent
+
+
+#################### IPv4 and IPv6 network ####################
+
+- name: Create network with IPv6 and custom IPv4 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ enable_ipv6: true
+ ipam_config:
+ - subnet: 10.26.120.0/24
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+ register: network
+
+- assert:
+ that:
+ - network is changed
+
+- name: Change subnet order of network with IPv6 and custom IPv4 IPAM config (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ enable_ipv6: true
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+ - subnet: 10.26.120.0/24
+ register: network
+
+- assert:
+ that:
+ - network is not changed
+
+- name: Remove IPv6 from network with custom IPv4 and IPv6 IPAM config (change)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ enable_ipv6: false
+ ipam_config:
+ - subnet: 10.26.120.0/24
+ register: network
+ diff: true
+
+- assert:
+ that:
+ - network is changed
+ - network.diff.differences | length == 1
+ - network.diff.differences[0] == "enable_ipv6"
+
+- name: Cleanup network with IPv6 and custom IPv4 IPAM config
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ state: absent
+
+
+#################### multiple IPv4 networks ####################
+
+- block:
+ - name: Create network with two IPv4 IPAM configs
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ driver: "macvlan"
+ driver_options:
+ parent: "{{ ansible_default_ipv4.alias }}"
+ ipam_config:
+ - subnet: 10.26.120.0/24
+ - subnet: 10.26.121.0/24
+ register: network
+
+ - assert:
+ that:
+ - network is changed
+
+ - name: Create network with two IPv4 IPAM configs (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ driver: "macvlan"
+ driver_options:
+ parent: "{{ ansible_default_ipv4.alias }}"
+ ipam_config:
+ - subnet: 10.26.121.0/24
+ - subnet: 10.26.120.0/24
+ register: network
+
+ - assert:
+ that:
+ - network is not changed
+
+ - name: Create network with two IPv4 IPAM configs (change)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ driver: "macvlan"
+ driver_options:
+ parent: "{{ ansible_default_ipv4.alias }}"
+ ipam_config:
+ - subnet: 10.26.120.0/24
+ - subnet: 10.26.122.0/24
+ register: network
+ diff: true
+
+ - assert:
+ that:
+ - network is changed
+ - network.diff.differences | length == 1
+
+ - name: Create network with one IPv4 IPAM config (no change)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ driver: "macvlan"
+ driver_options:
+ parent: "{{ ansible_default_ipv4.alias }}"
+ ipam_config:
+ - subnet: 10.26.122.0/24
+ register: network
+
+ - assert:
+ that:
+ - network is not changed
+
+ - name: Cleanup network
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ state: absent
+
+ when: ansible_facts.virtualization_type != 'docker' and ansible_default_ipv4.alias is defined
+
+
+#################### IPAM driver options ####################
+
+- name: Create network with IPAM driver options
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ ipam_driver: default
+ ipam_driver_options:
+ a: b
+ register: network_1
+ ignore_errors: true
+- name: Create network with IPAM driver options (idempotence)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ ipam_driver: default
+ ipam_driver_options:
+ a: b
+ diff: true
+ register: network_2
+ ignore_errors: true
+- name: Create network with IPAM driver options (change)
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ ipam_driver: default
+ ipam_driver_options:
+ a: c
+ diff: true
+ register: network_3
+ ignore_errors: true
+- name: Cleanup network
+ docker_network:
+ name: "{{ nname_ipam_3 }}"
+ state: absent
+
+- assert:
+ that:
+ - network_1 is changed
+ - network_2 is not changed
+ - network_3 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/options.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/options.yml
new file mode 100644
index 00000000..63315ca4
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/options.yml
@@ -0,0 +1,234 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering network name
+ set_fact:
+ nname_1: "{{ name_prefix ~ '-network-1' }}"
+- name: Registering network name
+ set_fact:
+ dnetworks: "{{ dnetworks + [nname_1] }}"
+
+####################################################################
+## internal ########################################################
+####################################################################
+
+- name: internal
+ docker_network:
+ name: "{{ nname_1 }}"
+ internal: true
+ register: internal_1
+
+- name: internal (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ internal: true
+ register: internal_2
+
+- name: internal (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ internal: false
+ register: internal_3
+
+- name: cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: true
+
+- assert:
+ that:
+ - internal_1 is changed
+ - internal_2 is not changed
+ - internal_3 is changed
+
+####################################################################
+## driver_options ##################################################
+####################################################################
+
+- name: driver_options
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: 'false'
+ register: driver_options_1
+
+- name: driver_options (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: 'false'
+ register: driver_options_2
+
+- name: driver_options (idempotency with string translation)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: False
+ register: driver_options_3
+
+- name: driver_options (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: 'true'
+ register: driver_options_4
+
+- name: driver_options (idempotency with string translation)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver_options:
+ com.docker.network.bridge.enable_icc: True
+ register: driver_options_5
+
+- name: cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: true
+
+- assert:
+ that:
+ - driver_options_1 is changed
+ - driver_options_2 is not changed
+ - driver_options_3 is not changed
+ - driver_options_4 is changed
+ - driver_options_5 is not changed
+
+####################################################################
+## scope ###########################################################
+####################################################################
+
+- block:
+ - name: scope
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: bridge
+ scope: local
+ register: scope_1
+
+ - name: scope (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: bridge
+ scope: local
+ register: scope_2
+
+ - name: swarm
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ ansible_default_ipv4.address | default('127.0.0.1') }}"
+
+ # Driver change alongside scope is intentional - bridge doesn't appear to support anything but local, and overlay can't downgrade to local. Additionally, overlay reports as swarm for swarm OR global, so no change is reported in that case.
+ # Test output indicates that the scope is altered, at least, so manual inspection will be required to verify this going forward, unless we come up with a test driver that supports multiple scopes.
+ - name: scope (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: overlay
+ scope: swarm
+ register: scope_3
+
+ - name: cleanup network
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: true
+
+ - assert:
+ that:
+ - scope_1 is changed
+ - scope_2 is not changed
+ - scope_3 is changed
+
+ always:
+ - name: cleanup swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+####################################################################
+## attachable ######################################################
+####################################################################
+
+- name: attachable
+ docker_network:
+ name: "{{ nname_1 }}"
+ attachable: true
+ register: attachable_1
+ ignore_errors: true
+
+- name: attachable (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ attachable: true
+ register: attachable_2
+ ignore_errors: true
+
+- name: attachable (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ attachable: false
+ register: attachable_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: true
+
+- assert:
+ that:
+ - attachable_1 is changed
+ - attachable_2 is not changed
+ - attachable_3 is changed
+
+####################################################################
+## labels ##########################################################
+####################################################################
+
+- name: labels
+ docker_network:
+ name: "{{ nname_1 }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ register: labels_1
+
+- name: labels (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ labels:
+ ansible.test.2: world
+ ansible.test.1: hello
+ register: labels_2
+
+- name: labels (less labels)
+ docker_network:
+ name: "{{ nname_1 }}"
+ labels:
+ ansible.test.1: hello
+ register: labels_3
+
+- name: labels (more labels)
+ docker_network:
+ name: "{{ nname_1 }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ register: labels_4
+
+- name: cleanup
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: true
+
+- assert:
+ that:
+ - labels_1 is changed
+ - labels_2 is not changed
+ - labels_3 is not changed
+ - labels_4 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/overlay.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/overlay.yml
new file mode 100644
index 00000000..59d79cc0
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/overlay.yml
@@ -0,0 +1,62 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering network name
+ set_fact:
+ nname_1: "{{ name_prefix ~ '-network-1' }}"
+- name: Registering network name
+ set_fact:
+ dnetworks: "{{ dnetworks + [nname_1] }}"
+
+####################################################################
+## overlay #########################################################
+####################################################################
+
+- block:
+ # Overlay networks require swarm initialization before they'll work
+ - name: swarm
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ ansible_default_ipv4.address | default('127.0.0.1') }}"
+
+ - name: overlay
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: overlay
+ driver_options:
+ com.docker.network.driver.overlay.vxlanid_list: "257"
+ register: overlay_1
+
+ - name: overlay (idempotency)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: overlay
+ driver_options:
+ com.docker.network.driver.overlay.vxlanid_list: "257"
+ register: overlay_2
+
+ - name: overlay (change)
+ docker_network:
+ name: "{{ nname_1 }}"
+ driver: bridge
+ register: overlay_3
+
+ - name: cleanup network
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: absent
+ force: true
+
+ - assert:
+ that:
+ - overlay_1 is changed
+ - overlay_2 is not changed
+ - overlay_3 is changed
+
+ always:
+ - name: cleanup swarm
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/substring.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/substring.yml
new file mode 100644
index 00000000..b4b37b27
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network/tasks/tests/substring.yml
@@ -0,0 +1,41 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container and network names
+ set_fact:
+ nname_1: "{{ name_prefix ~ '-network-foo' }}"
+ nname_2: "{{ name_prefix ~ '-network-foobar' }}"
+- name: Registering container and network names
+ set_fact:
+ dnetworks: "{{ dnetworks + [nname_1, nname_2] }}"
+
+####################################################################
+
+- name: Create network (superstring)
+ docker_network:
+ name: "{{ nname_2 }}"
+ state: present
+ register: networks_1
+
+- name: Create network (substring)
+ docker_network:
+ name: "{{ nname_1 }}"
+ state: present
+ register: networks_2
+
+- name: Cleanup
+ docker_network:
+ name: "{{ network_name }}"
+ state: absent
+ loop:
+ - "{{ nname_1 }}"
+ - "{{ nname_2 }}"
+ loop_control:
+ loop_var: network_name
+
+- assert:
+ that:
+ - networks_1 is changed
+ - networks_2 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_network_info/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network_info/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network_info/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_network_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_network_info/tasks/main.yml
new file mode 100644
index 00000000..910b4ec8
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_network_info/tasks/main.yml
@@ -0,0 +1,80 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Create random network name
+ set_fact:
+ nname: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+
+ - name: Make sure network is not there
+ docker_network:
+ name: "{{ nname }}"
+ state: absent
+ force: true
+
+ - name: Inspect a non-present network
+ docker_network_info:
+ name: "{{ nname }}"
+ register: result
+
+ - assert:
+ that:
+ - "not result.exists"
+ - "'network' in result"
+ - "result.network is none"
+
+ - name: Make sure network exists
+ docker_network:
+ name: "{{ nname }}"
+ state: present
+
+ - name: Inspect a present network
+ docker_network_info:
+ name: "{{ nname }}"
+ register: result
+ - name: Dump docker_network_info result
+ debug: var=result
+
+ - name: "Comparison: use 'docker network inspect'"
+ command: docker network inspect "{{ nname }}"
+ register: docker_inspect
+ ignore_errors: true
+ - block:
+ - set_fact:
+ docker_inspect_result: "{{ docker_inspect.stdout | from_json }}"
+ - name: Dump docker inspect result
+ debug: var=docker_inspect_result
+ when: docker_inspect is not failed
+
+ - name: Cleanup
+ docker_network:
+ name: "{{ nname }}"
+ state: absent
+ force: true
+
+ - assert:
+ that:
+ - result.exists
+ - "'network' in result"
+ - "result.network"
+
+ - assert:
+ that:
+ - "result.network == docker_inspect_result[0]"
+ when: docker_inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in docker_inspect.stderr"
+ when: docker_inspect is failed
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_network_info tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_node/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_node/aliases
new file mode 100644
index 00000000..50e0e5f3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_node/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/2
+destructive
+needs/root
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_node/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_node/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_node/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/main.yml
new file mode 100644
index 00000000..68bb5426
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/main.yml
@@ -0,0 +1,41 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+# Run the tests
+- block:
+ - include_tasks: test_node.yml
+
+ always:
+ - name: Cleanup (trying)
+ docker_swarm:
+ state: absent
+ force: true
+ diff: false
+ ignore_errors: true
+
+ - name: Restart docker daemon
+ service:
+ name: docker
+ state: restarted
+ become: true
+ - name: Wait for docker daemon to be fully restarted
+ command: docker ps
+ ignore_errors: true
+
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
+ diff: false
+
+ when: docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_node tests!"
+ when: not(docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/test_node.yml b/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/test_node.yml
new file mode 100644
index 00000000..89c9b355
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_node/tasks/test_node.yml
@@ -0,0 +1,844 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Try to get docker_node_info when docker is not running in swarm mode
+ docker_node_info:
+ ignore_errors: true
+ register: output
+
+ - name: assert failure when called when swarm is not in use or not run on manager node
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ register: output
+
+ - name: assert changed when create a new swarm cluster
+ assert:
+ that:
+ - 'output is changed'
+ - 'output.actions[0] | regex_search("New Swarm cluster created: ")'
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+
+ - name: Try to get docker_node_info when docker is running in swarm mode and as manager
+ docker_node_info:
+ register: output
+
+ - name: assert reading docker swarm node facts
+ assert:
+ that:
+ - 'output.nodes | length > 0'
+ - 'output.nodes[0].ID is string'
+
+ - name: Register node ID
+ set_fact:
+ nodeid: "{{ output.nodes[0].ID }}"
+
+####################################################################
+## Set node as swarm manager #######################################
+####################################################################
+
+ - name: Try to set node as manager (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: manager
+ check_mode: true
+ register: set_as_manager_1
+
+ - name: Try to set node as manager
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: manager
+ register: set_as_manager_2
+
+ - name: Try to set node as manager (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: manager
+ register: set_as_manager_3
+
+ - name: Try to set node as manager (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: manager
+ check_mode: true
+ register: set_as_manager_4
+
+ - name: assert that node role does has not changed
+ assert:
+ that:
+ - 'set_as_manager_1 is not changed'
+ - 'set_as_manager_2 is not changed'
+ - 'set_as_manager_3 is not changed'
+ - 'set_as_manager_4 is not changed'
+ - 'set_as_manager_1.node.Spec.Role == "manager"'
+ - 'set_as_manager_2.node.Spec.Role == "manager"'
+ - 'set_as_manager_3.node.Spec.Role == "manager"'
+ - 'set_as_manager_4.node.Spec.Role == "manager"'
+
+####################################################################
+## Set node as swarm worker ########################################
+####################################################################
+
+ - name: Try to set node as worker (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: worker
+ check_mode: true
+ register: set_as_worker_1
+
+ - name: Try to set node as worker
+ docker_node:
+ hostname: "{{ nodeid }}"
+ role: worker
+ ignore_errors: true
+ register: set_as_worker_2
+
+ - name: assert that node cannot change role to worker
+ assert:
+ that:
+ - 'set_as_worker_1 is changed'
+ - 'set_as_worker_2 is failed'
+ - 'set_as_worker_2.msg | regex_search("attempting to demote the last manager of the swarm")'
+
+####################################################################
+## Set node as pasued ##############################################
+####################################################################
+
+ - name: Try to set node availability as paused (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: pause
+ check_mode: true
+ register: set_as_paused_1
+
+ - name: Try to set node availability as paused
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: pause
+ register: set_as_paused_2
+
+ - name: Try to set node availability as paused (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: pause
+ register: set_as_paused_3
+
+ - name: Try to set node availability as paused (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: pause
+ check_mode: true
+ register: set_as_paused_4
+
+ - name: assert node changed availability to paused
+ assert:
+ that:
+ - 'set_as_paused_1 is changed'
+ - 'set_as_paused_2 is changed'
+ - 'set_as_paused_3 is not changed'
+ - 'set_as_paused_4 is not changed'
+ - 'set_as_paused_2.node.Spec.Availability == "pause"'
+
+####################################################################
+## Set node as drained #############################################
+####################################################################
+
+ - name: Try to set node availability as drained (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: drain
+ check_mode: true
+ register: output_drain_1
+
+ - name: Try to set node availability as drained
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: drain
+ register: output_drain_2
+
+ - name: Try to set node availability as drained (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: drain
+ register: output_drain_3
+
+ - name: Try to set node availability as drained (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: drain
+ check_mode: true
+ register: output_drain_4
+
+ - name: assert node changed availability to drained
+ assert:
+ that:
+ - 'output_drain_1 is changed'
+ - 'output_drain_2 is changed'
+ - 'output_drain_3 is not changed'
+ - 'output_drain_4 is not changed'
+ - 'output_drain_2.node.Spec.Availability == "drain"'
+
+
+####################################################################
+## Set node as active ##############################################
+####################################################################
+
+ - name: Try to set node availability as active (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: active
+ check_mode: true
+ register: output_active_1
+
+ - name: Try to set node availability as active
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: active
+ register: output_active_2
+
+ - name: Try to set node availability as active (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: active
+ register: output_active_3
+
+ - name: Try to set node availability as active (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ availability: active
+ check_mode: true
+ register: output_active_4
+
+ - name: assert node changed availability to active
+ assert:
+ that:
+ - 'output_active_1 is changed'
+ - 'output_active_2 is changed'
+ - 'output_active_3 is not changed'
+ - 'output_active_4 is not changed'
+ - 'output_active_2.node.Spec.Availability == "active"'
+
+####################################################################
+## Add single label ###############################################
+####################################################################
+
+ - name: Try to add single label to swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1
+ check_mode: true
+ register: output_add_single_label_1
+
+ - name: Try to add single label to swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1
+ register: output_add_single_label_2
+
+ - name: Try to add single label to swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1
+ register: output_add_single_label_3
+
+ - name: Try to add single label to swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1
+ check_mode: true
+ register: output_add_single_label_4
+
+ - name: assert adding single label to swarm node
+ assert:
+ that:
+ - 'output_add_single_label_1 is changed'
+ - 'output_add_single_label_2 is changed'
+ - 'output_add_single_label_3 is not changed'
+ - 'output_add_single_label_4 is not changed'
+ - 'output_add_single_label_2.node.Spec.Labels | length == 1'
+ - 'output_add_single_label_2.node.Spec.Labels.label1 == "value1"'
+
+####################################################################
+## Add multiple labels #############################################
+####################################################################
+
+ - name: Try to add five labels to swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2
+ label3: value3
+ label4: value4
+ label5: value5
+ label6: value6
+ check_mode: true
+ register: output_add_multiple_labels_1
+
+ - name: Try to add five labels to swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2
+ label3: value3
+ label4: value4
+ label5: value5
+ label6: value6
+ register: output_add_multiple_labels_2
+
+ - name: Try to add five labels to swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2
+ label3: value3
+ label4: value4
+ label5: value5
+ label6: value6
+ register: output_add_multiple_labels_3
+
+ - name: Try to add five labels to swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2
+ label3: value3
+ label4: value4
+ label5: value5
+ label6: value6
+ check_mode: true
+ register: output_add_multiple_labels_4
+
+ - name: assert adding multiple labels to swarm node
+ assert:
+ that:
+ - 'output_add_multiple_labels_1 is changed'
+ - 'output_add_multiple_labels_2 is changed'
+ - 'output_add_multiple_labels_3 is not changed'
+ - 'output_add_multiple_labels_4 is not changed'
+ - 'output_add_multiple_labels_2.node.Spec.Labels | length == 6'
+ - 'output_add_multiple_labels_2.node.Spec.Labels.label1 == "value1"'
+ - 'output_add_multiple_labels_2.node.Spec.Labels.label6 == "value6"'
+
+####################################################################
+## Update label value ##############################################
+####################################################################
+
+ - name: Update value of existing label (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1111
+ check_mode: true
+ register: output_update_label_1
+
+ - name: Update value of existing label
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1111
+ register: output_update_label_2
+
+ - name: Update value of existing label (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1111
+ register: output_update_label_3
+
+ - name: Update value of existing label (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label1: value1111
+ check_mode: true
+ register: output_update_label_4
+
+ - name: assert updating single label assigned to swarm node
+ assert:
+ that:
+ - 'output_update_label_1 is changed'
+ - 'output_update_label_2 is changed'
+ - 'output_update_label_3 is not changed'
+ - 'output_update_label_4 is not changed'
+ - 'output_update_label_2.node.Spec.Labels | length == 6'
+ - 'output_update_label_2.node.Spec.Labels.label1 == "value1111"'
+ - 'output_update_label_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Update multiple labels values ###################################
+####################################################################
+
+ - name: Update value of multiple existing label (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2222
+ label3: value3333
+ check_mode: true
+ register: output_update_labels_1
+
+ - name: Update value of multiple existing label
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2222
+ label3: value3333
+ register: output_update_labels_2
+
+ - name: Update value of multiple existing label (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2222
+ label3: value3333
+ register: output_update_labels_3
+
+ - name: Update value of multiple existing label (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label2: value2222
+ label3: value3333
+ check_mode: true
+ register: output_update_labels_4
+
+ - name: assert updating multiple labels assigned to swarm node
+ assert:
+ that:
+ - 'output_update_labels_1 is changed'
+ - 'output_update_labels_2 is changed'
+ - 'output_update_labels_3 is not changed'
+ - 'output_update_labels_4 is not changed'
+ - 'output_update_labels_2.node.Spec.Labels | length == 6'
+ - 'output_update_labels_2.node.Spec.Labels.label1 == "value1111"'
+ - 'output_update_labels_2.node.Spec.Labels.label3 == "value3333"'
+ - 'output_update_labels_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Remove single label #############################################
+####################################################################
+
+ - name: Try to remove single existing label from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label1
+ check_mode: true
+ register: output_remove_label_1
+
+ - name: Try to remove single existing label from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label1
+ register: output_remove_label_2
+
+ - name: Try to remove single existing label from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label1
+ register: output_remove_label_3
+
+ - name: Try to remove single existing label from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label1
+ check_mode: true
+ register: output_remove_label_4
+
+ - name: assert removing single label from swarm node
+ assert:
+ that:
+ - 'output_remove_label_1 is changed'
+ - 'output_remove_label_2 is changed'
+ - 'output_remove_label_3 is not changed'
+ - 'output_remove_label_4 is not changed'
+ - 'output_remove_label_2.node.Spec.Labels | length == 5'
+ - '"label1" not in output_remove_label_2.node.Spec.Labels'
+ - 'output_remove_label_2.node.Spec.Labels.label3 == "value3333"'
+ - 'output_remove_label_2.node.Spec.Labels.label5 == "value5"'
+
+
+####################################################################
+## Remove single not assigned to swarm label #######################
+####################################################################
+
+ - name: Try to remove single non-existing label from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - labelnotexist
+ check_mode: true
+ register: output_remove_nonexist_label_1
+
+ - name: Try to remove single non-existing label from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - labelnotexist
+ register: output_remove_nonexist_label_2
+
+ - name: Try to remove single non-existing label from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - labelnotexist
+ register: output_remove_nonexist_label_3
+
+ - name: Try to remove single non-existing label from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - labelnotexist
+ check_mode: true
+ register: output_remove_nonexist_label_4
+
+ - name: assert removing single non-existing label from swarm node
+ assert:
+ that:
+ - 'output_remove_nonexist_label_1 is not changed'
+ - 'output_remove_nonexist_label_2 is not changed'
+ - 'output_remove_nonexist_label_3 is not changed'
+ - 'output_remove_nonexist_label_4 is not changed'
+ - 'output_remove_nonexist_label_2.node.Spec.Labels | length == 5'
+ - '"label1" not in output_remove_nonexist_label_2.node.Spec.Labels'
+ - 'output_remove_nonexist_label_2.node.Spec.Labels.label3 == "value3333"'
+ - 'output_remove_nonexist_label_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Remove multiple labels ##########################################
+####################################################################
+
+ - name: Try to remove two existing labels from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label2
+ - label3
+ check_mode: true
+ register: output_remove_label_1
+
+ - name: Try to remove two existing labels from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label2
+ - label3
+ register: output_remove_label_2
+
+ - name: Try to remove two existing labels from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label2
+ - label3
+ register: output_remove_label_3
+
+ - name: Try to remove two existing labels from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label2
+ - label3
+ check_mode: true
+ register: output_remove_label_4
+
+ - name: assert removing multiple labels from swarm node
+ assert:
+ that:
+ - 'output_remove_label_1 is changed'
+ - 'output_remove_label_2 is changed'
+ - 'output_remove_label_3 is not changed'
+ - 'output_remove_label_4 is not changed'
+ - 'output_remove_label_2.node.Spec.Labels | length == 3'
+ - '"label1" not in output_remove_label_2.node.Spec.Labels'
+ - '"label2" not in output_remove_label_2.node.Spec.Labels'
+ - 'output_remove_label_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Remove multiple labels, mix assigned and not assigned ##########
+####################################################################
+
+ - name: Try to remove mix of existing amd non-existing labels from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label4
+ - labelisnotthere
+ check_mode: true
+ register: output_remove_mix_labels_1
+
+ - name: Try to remove mix of existing amd non-existing labels from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label4
+ - labelisnotthere
+ register: output_remove_mix_labels_2
+
+ - name: Try to remove mix of existing amd non-existing labels from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label4
+ - labelisnotthere
+ register: output_remove_mix_labels_3
+
+ - name: Try to remove mix of existing amd non-existing labels from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_to_remove:
+ - label4
+ - labelisnotthere
+ check_mode: true
+ register: output_remove_mix_labels_4
+
+ - name: assert removing mix of existing and non-existing labels from swarm node
+ assert:
+ that:
+ - 'output_remove_mix_labels_1 is changed'
+ - 'output_remove_mix_labels_2 is changed'
+ - 'output_remove_mix_labels_3 is not changed'
+ - 'output_remove_mix_labels_4 is not changed'
+ - 'output_remove_mix_labels_2.node.Spec.Labels | length == 2'
+ - '"label1" not in output_remove_mix_labels_2.node.Spec.Labels'
+ - '"label4" not in output_remove_mix_labels_2.node.Spec.Labels'
+ - 'output_remove_mix_labels_2.node.Spec.Labels.label5 == "value5"'
+
+####################################################################
+## Add and remove labels ###########################################
+####################################################################
+
+ - name: Try to add and remove nonoverlapping labels at the same time (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label7: value7
+ label8: value8
+ labels_to_remove:
+ - label5
+ check_mode: true
+ register: output_add_del_labels_1
+
+ - name: Try to add and remove nonoverlapping labels at the same time
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label7: value7
+ label8: value8
+ labels_to_remove:
+ - label5
+ register: output_add_del_labels_2
+
+ - name: Try to add and remove nonoverlapping labels at the same time (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label7: value7
+ label8: value8
+ labels_to_remove:
+ - label5
+ register: output_add_del_labels_3
+
+ - name: Try to add and remove nonoverlapping labels at the same time (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label7: value7
+ label8: value8
+ labels_to_remove:
+ - label5
+ check_mode: true
+ register: output_add_del_labels_4
+
+ - name: assert adding and removing nonoverlapping labels from swarm node
+ assert:
+ that:
+ - 'output_add_del_labels_1 is changed'
+ - 'output_add_del_labels_2 is changed'
+ - 'output_add_del_labels_3 is not changed'
+ - 'output_add_del_labels_4 is not changed'
+ - 'output_add_del_labels_2.node.Spec.Labels | length == 3'
+ - '"label5" not in output_add_del_labels_2.node.Spec.Labels'
+ - 'output_add_del_labels_2.node.Spec.Labels.label8 == "value8"'
+
+####################################################################
+## Add and remove labels with label in both lists ##################
+####################################################################
+
+ - name: Try to add or update and remove overlapping labels at the same time (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label22: value22
+ label6: value6666
+ labels_to_remove:
+ - label6
+ - label7
+ check_mode: true
+ register: output_add_del_overlap_lables_1
+
+ - name: Try to add or update and remove overlapping labels at the same time
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label22: value22
+ label6: value6666
+ labels_to_remove:
+ - label6
+ - label7
+ register: output_add_del_overlap_lables_2
+
+ - name: Try to add or update and remove overlapping labels at the same time (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label22: value22
+ label6: value6666
+ labels_to_remove:
+ - label6
+ - label7
+ register: output_add_del_overlap_lables_3
+
+ - name: Try to add or update and remove overlapping labels at the same time (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label22: value22
+ label6: value6666
+ labels_to_remove:
+ - label6
+ - label7
+ check_mode: true
+ register: output_add_del_overlap_lables_4
+
+ - name: assert adding or updating and removing overlapping labels from swarm node
+ assert:
+ that:
+ - 'output_add_del_overlap_lables_1 is changed'
+ - 'output_add_del_overlap_lables_2 is changed'
+ - 'output_add_del_overlap_lables_3 is not changed'
+ - 'output_add_del_overlap_lables_4 is not changed'
+ - 'output_add_del_overlap_lables_2.node.Spec.Labels | length == 3'
+ - '"label7" not in output_add_del_overlap_lables_2.node.Spec.Labels'
+ - 'output_add_del_overlap_lables_2.node.Spec.Labels.label6 == "value6666"'
+ - 'output_add_del_overlap_lables_2.node.Spec.Labels.label22 == "value22"'
+
+####################################################################
+## Replace labels #############################################
+####################################################################
+
+ - name: Replace labels on swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label11: value11
+ label12: value12
+ labels_state: replace
+ check_mode: true
+ register: output_replace_labels_1
+
+ - name: Replace labels on swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label11: value11
+ label12: value12
+ labels_state: replace
+ register: output_replace_labels_2
+
+ - name: Replace labels on swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label11: value11
+ label12: value12
+ labels_state: replace
+ register: output_replace_labels_3
+
+ - name: Replace labels on swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels:
+ label11: value11
+ label12: value12
+ labels_state: replace
+ check_mode: true
+ register: output_replace_labels_4
+
+ - name: assert replacing labels from swarm node
+ assert:
+ that:
+ - 'output_replace_labels_1 is changed'
+ - 'output_replace_labels_2 is changed'
+ - 'output_replace_labels_3 is not changed'
+ - 'output_replace_labels_4 is not changed'
+ - 'output_replace_labels_2.node.Spec.Labels | length == 2'
+ - '"label6" not in output_replace_labels_2.node.Spec.Labels'
+ - 'output_replace_labels_2.node.Spec.Labels.label12 == "value12"'
+
+####################################################################
+## Remove all labels #############################################
+####################################################################
+
+ - name: Remove all labels from swarm node (check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_state: replace
+ check_mode: true
+ register: output_remove_labels_1
+
+ - name: Remove all labels from swarm node
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_state: replace
+ register: output_remove_labels_2
+
+ - name: Remove all labels from swarm node (idempotent)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_state: replace
+ register: output_remove_labels_3
+
+ - name: Remove all labels from swarm node (idempotent check)
+ docker_node:
+ hostname: "{{ nodeid }}"
+ labels_state: replace
+ check_mode: true
+ register: output_remove_labels_4
+
+ - name: assert removing all lables from swarm node
+ assert:
+ that:
+ - 'output_remove_labels_1 is changed'
+ - 'output_remove_labels_2 is changed'
+ - 'output_remove_labels_3 is not changed'
+ - 'output_remove_labels_4 is not changed'
+ - 'output_remove_labels_2.node.Spec.Labels | length == 0'
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_node_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_node_info/aliases
new file mode 100644
index 00000000..9eec55e3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_node_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/2
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_node_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_node_info/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_node_info/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/main.yml
new file mode 100644
index 00000000..7d3a1b18
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_node_info.yml
+ when: docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_node_info tests!"
+ when: not(docker_py_version is version('2.4.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/test_node_info.yml b/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/test_node_info.yml
new file mode 100644
index 00000000..9a89a2a1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_node_info/tasks/test_node_info.yml
@@ -0,0 +1,92 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Try to get docker_node_info when docker is not running in swarm mode
+ docker_node_info:
+ ignore_errors: true
+ register: output
+
+ - name: assert failure when called when swarm is not in use or not run on manager node
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ register: output
+
+ - name: assert changed when create a new swarm cluster
+ assert:
+ that:
+ - 'output is changed'
+ - 'output.actions[0] | regex_search("New Swarm cluster created: ")'
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+
+ - name: Try to get docker_node_info when docker is running in swarm mode and as manager
+ docker_node_info:
+ register: output
+
+ - name: assert reading docker swarm node facts
+ assert:
+ that:
+ - 'output.nodes | length > 0'
+ - 'output.nodes[0].ID is string'
+
+ - name: Try to get docker_node_info using the self parameter
+ docker_node_info:
+ self: true
+ register: output
+
+ - name: assert reading swarm facts with list of nodes option
+ assert:
+ that:
+ - 'output.nodes | length == 1'
+ - 'output.nodes[0].ID is string'
+
+ - name: Get local docker node name
+ set_fact:
+ localnodename: "{{ output.nodes[0].Description.Hostname }}"
+
+
+ - name: Try to get docker_node_info using the local node name as parameter
+ docker_node_info:
+ name: "{{ localnodename }}"
+ register: output
+
+ - name: assert reading reading swarm facts and using node filter (random node name)
+ assert:
+ that:
+ - 'output.nodes | length == 1'
+ - 'output.nodes[0].ID is string'
+
+ - name: Create random name
+ set_fact:
+ randomnodename: "{{ 'node-%0x' % ((2**32) | random) }}"
+
+ - name: Try to get docker_node_info using random node name as parameter
+ docker_node_info:
+ name: "{{ randomnodename }}"
+ register: output
+
+ - name: assert reading reading swarm facts and using node filter (random node name)
+ assert:
+ that:
+ - 'output.nodes | length == 0'
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_plugin/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_plugin/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/main.yml
new file mode 100644
index 00000000..14261433
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "vieux/sshfs"
+ plugin_names: []
+
+- debug:
+ msg: "Using name prefix {{ name_prefix }}"
+
+- name: Check whether /dev/fuse exists
+ stat:
+ path: /dev/fuse
+ register: dev_fuse_stat
+
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure plugin is removed"
+ docker_plugin:
+ plugin_name: "{{ item }}"
+ state: absent
+ with_items: "{{ plugin_names }}"
+
+ when: docker_api_version is version('1.25', '>=') and dev_fuse_stat.stat.exists
+
+- fail: msg="Too old docker / docker-py version to run docker_plugin tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/tests/basic.yml b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/tests/basic.yml
new file mode 100644
index 00000000..8ea6058c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/tests/basic.yml
@@ -0,0 +1,192 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering plugin name
+ set_fact:
+ plugin_name: "{{ name_prefix }}"
+
+- name: Registering container name
+ set_fact:
+ plugin_names: "{{ plugin_names + [plugin_name] }}"
+
+############ basic test ############
+####################################
+
+- name: Create a plugin (check mode)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: present
+ register: create_1_check
+ check_mode: true
+
+- name: Create a plugin
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: present
+ register: create_1
+
+- name: Create a plugin (Idempotent, check mode)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: present
+ register: create_2_check
+ check_mode: true
+
+- name: Create a plugin (Idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: present
+ register: create_2
+
+- name: Enable a plugin (check mode)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: enable
+ register: create_3_check
+ check_mode: true
+
+- name: Enable a plugin
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: enable
+ register: create_3
+
+- name: Enable a plugin (Idempotent, check mode)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: enable
+ register: create_4_check
+ check_mode: true
+
+- name: Enable a plugin (Idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: enable
+ register: create_4
+
+- name: Disable a plugin (check mode)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: disable
+ register: absent_1_check
+ check_mode: true
+
+- name: Disable a plugin
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: disable
+ register: absent_1
+
+- name: Disable a plugin (Idempotent, check mode)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: disable
+ register: absent_2_check
+ check_mode: true
+
+- name: Disable a plugin (Idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: disable
+ register: absent_2
+
+- name: Remove a plugin (check mode)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: absent
+ register: absent_3_check
+ check_mode: true
+
+- name: Remove a plugin
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: absent
+ register: absent_3
+
+- name: Remove a plugin (Idempotent, check mode)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: absent
+ register: absent_4_check
+ check_mode: true
+
+- name: Remove a plugin (Idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: absent
+ register: absent_4
+
+- name: Cleanup
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: absent
+ force_remove: true
+
+- assert:
+ that:
+ - create_1_check is changed
+ - create_1 is changed
+ - create_2_check is not changed
+ - create_2 is not changed
+ - create_3_check is changed
+ - create_3 is changed
+ - create_4_check is not changed
+ - create_4 is not changed
+ - absent_1_check is changed
+ - absent_1 is changed
+ - absent_2_check is not changed
+ - absent_2 is not changed
+ - absent_3_check is changed
+ - absent_3 is changed
+ - absent_4_check is not changed
+ - absent_4 is not changed
+
+############ Plugin_Options ############
+########################################
+
+- name: Install a plugin with options
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ plugin_options:
+ DEBUG: '1'
+ state: present
+ register: create_1
+
+- name: Install a plugin with options (idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ plugin_options:
+ DEBUG: '1'
+ state: present
+ register: create_2
+
+- name: Install a plugin with different options
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ plugin_options:
+ DEBUG: '0'
+ state: present
+ register: update_1
+
+- name: Install a plugin with different options (idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ plugin_options:
+ DEBUG: '0'
+ state: present
+ register: update_2
+
+- name: Cleanup
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ state: absent
+ force_remove: true
+
+- assert:
+ that:
+ - create_1 is changed
+ - create_2 is not changed
+ - update_1 is changed
+ - update_2 is not changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/tests/basic_with_alias.yml b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/tests/basic_with_alias.yml
new file mode 100644
index 00000000..c26b188a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_plugin/tasks/tests/basic_with_alias.yml
@@ -0,0 +1,83 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Register plugin name and alias
+ set_fact:
+ plugin_name: "{{ name_prefix }}"
+ alias: "test"
+
+- name: Create a plugin with an alias
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ alias: "{{ alias }}"
+ state: present
+ register: create_1
+
+- name: Create a plugin with an alias (Idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ alias: "{{ alias }}"
+ state: present
+ register: create_2
+
+- name: Enable a plugin with an alias
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ alias: "{{ alias }}"
+ state: enable
+ register: create_3
+
+- name: Enable a plugin with an alias (Idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ alias: "{{ alias }}"
+ state: enable
+ register: create_4
+
+- name: Disable a plugin with an alias
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ alias: "{{ alias }}"
+ state: disable
+ register: absent_1
+
+- name: Disable a plugin with an alias (Idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ alias: "{{ alias }}"
+ state: disable
+ register: absent_2
+
+- name: Remove a plugin with an alias
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ alias: "{{ alias }}"
+ state: absent
+ register: absent_3
+
+- name: Remove a plugin with an alias (Idempotent)
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ alias: "{{ alias }}"
+ state: absent
+ register: absent_4
+
+- assert:
+ that:
+ - create_1 is changed
+ - create_2 is not changed
+ - create_3 is changed
+ - create_4 is not changed
+ - absent_1 is changed
+ - absent_2 is not changed
+ - absent_3 is changed
+ - absent_4 is not changed
+
+- name: Cleanup plugin with an alias
+ docker_plugin:
+ plugin_name: "{{ plugin_name }}"
+ alias: "{{ alias }}"
+ state: absent
+ force_remove: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_prune/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_prune/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_prune/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_prune/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_prune/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_prune/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_prune/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_prune/tasks/main.yml
new file mode 100644
index 00000000..b2160ef0
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_prune/tasks/main.yml
@@ -0,0 +1,153 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create random names
+ set_fact:
+ cname: "{{ 'ansible-container-%0x' % ((2**32) | random) }}"
+ nname: "{{ 'ansible-network-%0x' % ((2**32) | random) }}"
+ vname: "{{ 'ansible-volume-%0x' % ((2**32) | random) }}"
+
+- block:
+ # Create objects to be pruned
+ - name: Create container (without volume)
+ docker_container:
+ name: "{{ cname }}"
+ image: "{{ docker_test_image_hello_world }}"
+ state: present
+ register: container
+ - name: Create network
+ docker_network:
+ name: "{{ nname }}"
+ state: present
+ register: network
+ - name: Create named volume
+ docker_volume:
+ name: "{{ vname }}"
+ state: present
+ register: volume
+ - name: Create anonymous volume
+ command: docker volume create
+ register: volume_anon
+
+ - name: List volumes
+ command: docker volume list
+
+ # Prune objects
+ - name: Prune everything
+ docker_prune:
+ containers: true
+ images: true
+ networks: true
+ volumes: true
+ builder_cache: true
+ register: result
+
+ # Analyze result
+ - name: Show results
+ debug:
+ var: result
+ - name: General checks
+ assert:
+ that:
+ - result is changed
+ # containers
+ - container.container.Id in result.containers
+ - "'containers_space_reclaimed' in result"
+ # images
+ - "'images_space_reclaimed' in result"
+ # networks
+ - network.network.Name in result.networks
+ # volumes
+ - volume_anon.stdout in result.volumes
+ - "'volumes_space_reclaimed' in result"
+ # builder_cache
+ - "'builder_cache_space_reclaimed' in result"
+ - name: API-version specific volumes check (API version before 1.42)
+ assert:
+ that:
+ # For API version 1.41 and before, pruning always considers all volumes
+ - volume.volume.Name in result.volumes
+ when: docker_api_version is version('1.42', '<')
+ - name: API-version specific volumes check (API version 1.42+)
+ assert:
+ that:
+ # For API version 1.41 and before, pruning considers only anonymous volumes,
+ # so our named container is not removed
+ - volume.volume.Name not in result.volumes
+ when: docker_api_version is version('1.42', '>=')
+
+ # Prune objects again
+ - name: Prune everything again (should have no change)
+ docker_prune:
+ containers: true
+ images: true
+ networks: true
+ volumes: true
+ builder_cache: true
+ register: result
+
+ # Analyze result
+ - name: Show results
+ debug:
+ var: result
+ - name: General checks
+ assert:
+ that:
+ - result is not changed
+ # containers
+ - result.containers == []
+ - result.containers_space_reclaimed == 0
+ # images
+ - result.images == []
+ - result.images_space_reclaimed == 0
+ # networks
+ - result.networks == []
+ # volumes
+ - result.volumes == []
+ # builder_cache
+ - result.builder_cache_space_reclaimed == 0
+
+ # Test with filters
+ - name: Prune with filters
+ docker_prune:
+ images: true
+ images_filters:
+ dangling: true
+ register: result
+
+ - name: Show results
+ debug:
+ var: result
+
+ - name: Prune volumes with all filter (API version 1.42+)
+ when: docker_api_version is version('1.42', '>=')
+ block:
+ - name: Prune with filters
+ docker_prune:
+ volumes: true
+ volumes_filters:
+ all: true
+ register: result
+
+ - name: Show results
+ debug:
+ var: result
+
+ - name: Check results
+ assert:
+ that:
+ - result is changed
+ - volume.volume.Name in result.volumes
+ - "'volumes_space_reclaimed' in result"
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_prune tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_secret/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_secret/aliases
new file mode 100644
index 00000000..fc581d54
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_secret/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/3
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_secret/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_secret/meta/main.yml
new file mode 100644
index 00000000..2650229d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_secret/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/main.yml
new file mode 100644
index 00000000..291f6aa9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_secrets.yml
+ when: docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_secrets tests!"
+ when: not(docker_py_version is version('2.1.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/test_secrets.yml b/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/test_secrets.yml
new file mode 100644
index 00000000..2615b640
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_secret/tasks/test_secrets.yml
@@ -0,0 +1,222 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ ansible_default_ipv4.address | default('127.0.0.1') }}"
+
+ - name: Parameter name should be required
+ docker_secret:
+ state: present
+ ignore_errors: true
+ register: output
+
+ - name: assert failure when called with no name
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "missing required arguments: name"'
+
+ - name: Test parameters
+ docker_secret:
+ name: foo
+ state: present
+ ignore_errors: true
+ register: output
+
+ - name: assert failure when called with no data
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "state is present but any of the following are missing: data, data_src"'
+
+ - name: Create secret
+ docker_secret:
+ name: db_password
+ data: opensesame!
+ state: present
+ register: output
+
+ - name: Create variable secret_id
+ set_fact:
+ secret_id: "{{ output.secret_id }}"
+
+ - name: Inspect secret
+ command: "docker secret inspect {{ secret_id }}"
+ register: inspect
+ ignore_errors: true
+
+ - debug: var=inspect
+
+ - name: assert secret creation succeeded
+ assert:
+ that:
+ - "'db_password' in inspect.stdout"
+ - "'ansible_key' in inspect.stdout"
+ when: inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in inspect.stderr"
+ when: inspect is failed
+
+ - name: Create secret again
+ docker_secret:
+ name: db_password
+ data: opensesame!
+ state: present
+ register: output
+
+ - name: assert create secret is idempotent
+ assert:
+ that:
+ - not output.changed
+
+ - name: Write secret into file
+ copy:
+ dest: "{{ remote_tmp_dir }}/data"
+ content: |-
+ opensesame!
+
+ - name: Create secret again (from file)
+ docker_secret:
+ name: db_password
+ data_src: "{{ remote_tmp_dir }}/data"
+ state: present
+ register: output
+
+ - name: assert create secret is idempotent
+ assert:
+ that:
+ - not output.changed
+
+ - name: Create secret again (base64)
+ docker_secret:
+ name: db_password
+ data: b3BlbnNlc2FtZSE=
+ data_is_b64: true
+ state: present
+ register: output
+
+ - name: assert create secret (base64) is idempotent
+ assert:
+ that:
+ - not output.changed
+
+ - name: Update secret
+ docker_secret:
+ name: db_password
+ data: newpassword!
+ state: present
+ register: output
+
+ - name: assert secret was updated
+ assert:
+ that:
+ - output.changed
+ - output.secret_id != secret_id
+
+ - name: Remove secret
+ docker_secret:
+ name: db_password
+ state: absent
+
+ - name: Check that secret is removed
+ command: "docker secret inspect {{ secret_id }}"
+ register: output
+ ignore_errors: true
+
+ - name: assert secret was removed
+ assert:
+ that:
+ - output.failed
+
+# Rolling update
+
+ - name: Create rolling secret
+ docker_secret:
+ name: rolling_password
+ data: opensesame!
+ rolling_versions: true
+ state: present
+ register: original_output
+
+ - name: Create variable secret_id
+ set_fact:
+ secret_id: "{{ original_output.secret_id }}"
+
+ - name: Inspect secret
+ command: "docker secret inspect {{ secret_id }}"
+ register: inspect
+ ignore_errors: true
+
+ - debug: var=inspect
+
+ - name: assert secret creation succeeded
+ assert:
+ that:
+ - "'rolling_password' in inspect.stdout"
+ - "'ansible_key' in inspect.stdout"
+ - "'ansible_version' in inspect.stdout"
+ - original_output.secret_name == 'rolling_password_v1'
+ when: inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in inspect.stderr"
+ when: inspect is failed
+
+ - name: Create secret again
+ docker_secret:
+ name: rolling_password
+ data: newpassword!
+ rolling_versions: true
+ state: present
+ register: new_output
+
+ - name: assert that new version is created
+ assert:
+ that:
+ - new_output.changed
+ - new_output.secret_id != original_output.secret_id
+ - new_output.secret_name != original_output.secret_name
+ - new_output.secret_name == 'rolling_password_v2'
+
+ - name: Remove rolling secrets
+ docker_secret:
+ name: rolling_password
+ rolling_versions: true
+ state: absent
+
+ - name: Check that secret is removed
+ command: "docker secret inspect {{ original_output.secret_id }}"
+ register: output
+ ignore_errors: true
+
+ - name: assert secret was removed
+ assert:
+ that:
+ - output.failed
+
+ - name: Check that secret is removed
+ command: "docker secret inspect {{ new_output.secret_id }}"
+ register: output
+ ignore_errors: true
+
+ - name: assert secret was removed
+ assert:
+ that:
+ - output.failed
+
+ always:
+ - name: Remove Swarm cluster
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_stack/aliases
new file mode 100644
index 00000000..9eec55e3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/2
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack/meta/main.yml
new file mode 100644
index 00000000..2650229d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/main.yml
new file mode 100644
index 00000000..390e36ef
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_stack.yml
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_stack tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/test_stack.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/test_stack.yml
new file mode 100644
index 00000000..9f2d170e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack/tasks/test_stack.yml
@@ -0,0 +1,117 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ ansible_default_ipv4.address | default('127.0.0.1') }}"
+
+ - name: install docker_stack python requirements
+ pip:
+ name: jsondiff,pyyaml
+
+ - name: Create a stack without name
+ register: output
+ docker_stack:
+ state: present
+ ignore_errors: true
+
+ - name: assert failure when name not set
+ assert:
+ that:
+ - output is failed
+ - 'output.msg == "missing required arguments: name"'
+
+ - name: Create a stack without compose
+ register: output
+ docker_stack:
+ name: test_stack
+ ignore_errors: true
+
+ - name: assert failure when compose not set
+ assert:
+ that:
+ - output is failed
+ - 'output.msg == "compose parameter must be a list containing at least one element"'
+
+ - name: Ensure stack is absent
+ register: output
+ docker_stack:
+ state: absent
+ name: test_stack
+ absent_retries: 30
+
+ - name: Template compose files
+ template:
+ src: "{{item}}"
+ dest: "{{remote_tmp_dir}}/"
+ with_items:
+ - stack_compose_base.yml
+ - stack_compose_overrides.yml
+
+ - name: Create stack with compose file
+ register: output
+ docker_stack:
+ state: present
+ name: test_stack
+ compose:
+ - "{{remote_tmp_dir}}/stack_compose_base.yml"
+
+ - name: assert test_stack changed on stack creation with compose file
+ assert:
+ that:
+ - output is changed
+
+ # FIXME: updating the stack prevents leaving the swarm on Shippable
+ #- name: Update stack with YAML
+ # register: output
+ # docker_stack:
+ # state: present
+ # name: test_stack
+ # compose:
+ # - "{{stack_compose_base}}"
+ # - "{{stack_compose_overrides}}"
+ #
+ #- name: assert test_stack correctly changed on update with yaml
+ # assert:
+ # that:
+ # - output is changed
+ # - output.stack_spec_diff == stack_update_expected_diff
+
+ - name: Delete stack
+ register: output
+ docker_stack:
+ state: absent
+ name: test_stack
+ absent_retries: 30
+
+ - name: assert delete of existing stack returns changed
+ assert:
+ that:
+ - output is changed
+
+ - name: Delete stack again
+ register: output
+ docker_stack:
+ state: absent
+ name: test_stack
+ absent_retries: 30
+
+ - name: assert state=absent idempotency
+ assert:
+ that:
+ - output is not changed
+
+ always:
+ - name: Remove a Swarm cluster
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_base.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_base.yml
new file mode 100644
index 00000000..03603327
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_base.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3'
+services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml
new file mode 100644
index 00000000..8743f1e9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack/templates/stack_compose_overrides.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3'
+services:
+ busybox:
+ environment:
+ envvar: value
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack/vars/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack/vars/main.yml
new file mode 100644
index 00000000..a668012f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack/vars/main.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+stack_compose_base:
+ version: '3'
+ services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
+
+stack_compose_overrides:
+ version: '3'
+ services:
+ busybox:
+ environment:
+ envvar: value
+
+stack_update_expected_diff: '{"test_stack_busybox": {"TaskTemplate": {"ContainerSpec": {"Env": ["envvar=value"]}}}}'
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/aliases
new file mode 100644
index 00000000..9eec55e3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/2
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/meta/main.yml
new file mode 100644
index 00000000..2650229d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/main.yml
new file mode 100644
index 00000000..c6cbb617
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_stack_info.yml
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_stack tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml
new file mode 100644
index 00000000..58d6d5bb
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/tasks/test_stack_info.yml
@@ -0,0 +1,78 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Get docker_stack_info when docker is not running in swarm mode
+ docker_stack_info:
+ ignore_errors: true
+ register: output
+
+ - name: Assert failure when called when swarm is not running
+ assert:
+ that:
+ - 'output is failed'
+ - '"Error running docker stack" in output.msg'
+
+ - name: Create a swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ ansible_default_ipv4.address | default('127.0.0.1') }}"
+
+ - name: Get docker_stack_info when docker is running and not stack available
+ docker_stack_info:
+ register: output
+
+ - name: Assert stack facts
+ assert:
+ that:
+ - 'output.results | type_debug == "list"'
+ - 'output.results | length == 0'
+
+ - name: Template compose files
+ template:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/"
+ with_items:
+ - stack_compose_base.yml
+ - stack_compose_overrides.yml
+
+ - name: Install docker_stack python requirements
+ pip:
+ name: jsondiff,pyyaml
+
+ - name: Create stack with compose file
+ register: output
+ docker_stack:
+ state: present
+ name: test_stack
+ compose:
+ - "{{ remote_tmp_dir }}/stack_compose_base.yml"
+
+ - name: Assert test_stack changed on stack creation with compose file
+ assert:
+ that:
+ - output is changed
+
+ - name: Get docker_stack_info when docker is running
+ docker_stack_info:
+ register: output
+
+ - name: assert stack facts
+ assert:
+ that:
+ - 'output.results | type_debug == "list"'
+ - 'output.results[0].Name == "test_stack"'
+ - 'output.results[0].Services == "1"'
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml
new file mode 100644
index 00000000..03603327
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_base.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3'
+services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml
new file mode 100644
index 00000000..8743f1e9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/templates/stack_compose_overrides.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3'
+services:
+ busybox:
+ environment:
+ envvar: value
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/vars/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/vars/main.yml
new file mode 100644
index 00000000..a668012f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_info/vars/main.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+stack_compose_base:
+ version: '3'
+ services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
+
+stack_compose_overrides:
+ version: '3'
+ services:
+ busybox:
+ environment:
+ envvar: value
+
+stack_update_expected_diff: '{"test_stack_busybox": {"TaskTemplate": {"ContainerSpec": {"Env": ["envvar=value"]}}}}'
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/aliases
new file mode 100644
index 00000000..9eec55e3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/2
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/meta/main.yml
new file mode 100644
index 00000000..2650229d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/main.yml
new file mode 100644
index 00000000..b52fa9c7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_stack_task_info.yml
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_stack tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml
new file mode 100644
index 00000000..30b5ca9e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/tasks/test_stack_task_info.yml
@@ -0,0 +1,88 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Get docker_stack_info when docker is not running in swarm mode
+ docker_stack_info:
+ ignore_errors: true
+ register: output
+
+ - name: Assert failure when called when swarm is not running
+ assert:
+ that:
+ - 'output is failed'
+ - '"Error running docker stack" in output.msg'
+
+ - name: Create a swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ ansible_default_ipv4.address | default('127.0.0.1') }}"
+
+ - name: Get docker_stack_info when docker is running and not stack available
+ docker_stack_info:
+ register: output
+
+ - name: Assert stack facts
+ assert:
+ that:
+ - 'output.results | type_debug == "list"'
+ - 'output.results | length == 0'
+
+ - name: Template compose files
+ template:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/"
+ with_items:
+ - stack_compose_base.yml
+ - stack_compose_overrides.yml
+
+ - name: Install docker_stack python requirements
+ pip:
+ name: jsondiff,pyyaml
+
+ - name: Create stack with compose file
+ register: output
+ docker_stack:
+ state: present
+ name: test_stack
+ compose:
+ - "{{ remote_tmp_dir }}/stack_compose_base.yml"
+
+ - name: Assert test_stack changed on stack creation with compose file
+ assert:
+ that:
+ - output is changed
+
+ - name: Wait a bit to make sure stack is running
+ pause:
+ seconds: 5
+
+ - name: Get docker_stack_info when docker is running
+ docker_stack_info:
+ register: output
+
+ - name: Get docker_stack_task_info first element
+ docker_stack_task_info:
+ name: "{{ output.results[0].Name }}"
+ register: output
+
+ - name: assert stack facts
+ assert:
+ that:
+ - 'output.results | type_debug == "list"'
+ - 'output.results[0].DesiredState == "Running"'
+ - 'output.results[0].Image == docker_test_image_busybox'
+ - 'output.results[0].Name == "test_stack_busybox.1"'
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml
new file mode 100644
index 00000000..03603327
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_base.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3'
+services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml
new file mode 100644
index 00000000..8743f1e9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/templates/stack_compose_overrides.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+version: '3'
+services:
+ busybox:
+ environment:
+ envvar: value
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/vars/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/vars/main.yml
new file mode 100644
index 00000000..a668012f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_stack_task_info/vars/main.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+stack_compose_base:
+ version: '3'
+ services:
+ busybox:
+ image: "{{ docker_test_image_busybox }}"
+ command: sleep 3600
+
+stack_compose_overrides:
+ version: '3'
+ services:
+ busybox:
+ environment:
+ envvar: value
+
+stack_update_expected_diff: '{"test_stack_busybox": {"TaskTemplate": {"ContainerSpec": {"Env": ["envvar=value"]}}}}'
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/aliases
new file mode 100644
index 00000000..19c65551
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/1
+destructive
+needs/root
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/meta/main.yml
new file mode 100644
index 00000000..e7ff3d68
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/meta/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_openssl
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/cleanup.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/cleanup.yml
new file mode 100644
index 00000000..944e795c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/cleanup.yml
@@ -0,0 +1,38 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: CLEANUP | Leave Docker Swarm
+ docker_swarm:
+ state: absent
+ force: true
+ ignore_errors: true
+ register: leave_swarm
+
+- name: CLEANUP | Kill Docker and cleanup
+ when: leave_swarm is failed
+ block:
+ - name: CLEANUP | Kill docker daemon
+ command: systemctl kill -s 9 docker
+ become: true
+
+ - name: CLEANUP | Clear out /var/lib/docker
+ shell: rm -rf /var/lib/docker/*
+
+ - name: CLEANUP | Start docker daemon
+ service:
+ name: docker
+ state: started
+ become: true
+
+ - name: CLEANUP | Wait for docker daemon to be fully started
+ command: docker ps
+ register: result
+ until: result is success
+ retries: 10
+
+ - name: CLEANUP | Leave Docker Swarm
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/main.yml
new file mode 100644
index 00000000..16f68153
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Run Docker Swarm tests
+ when:
+ - docker_py_version is version('1.10.0', '>=')
+ - docker_api_version is version('1.25', '>=')
+
+ block:
+ - include_tasks: "{{ item }}"
+ with_fileglob:
+ - 'tests/*.yml'
+
+ always:
+ - import_tasks: cleanup.yml
+
+- fail:
+ msg: "Too old docker / docker-py version to run docker_swarm tests!"
+ when:
+ - not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.25', '>='))
+ - (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/run-test.yml
new file mode 100644
index 00000000..f55df21f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/run-test.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/basic.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/basic.yml
new file mode 100644
index 00000000..79d524e5
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/basic.yml
@@ -0,0 +1,163 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- debug:
+ msg: Running tests/basic.yml
+
+####################################################################
+## Errors ##########################################################
+####################################################################
+- name: Test parameters with state=join
+ docker_swarm:
+ state: join
+ ignore_errors: true
+ register: output
+
+- name: assert failure when called with state=join and no remote_addrs,join_token
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "state is join but all of the following are missing: remote_addrs, join_token"'
+
+- name: Test parameters with state=remove
+ docker_swarm:
+ state: remove
+ ignore_errors: true
+ register: output
+
+- name: assert failure when called with state=remove and no node_id
+ assert:
+ that:
+ - 'output.failed'
+ - 'output.msg == "state is remove but all of the following are missing: node_id"'
+
+####################################################################
+## Creation ########################################################
+####################################################################
+
+- name: Create a Swarm cluster (check mode)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ diff: true
+ register: output_2
+
+- name: Create a Swarm cluster (idempotent)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ diff: true
+ register: output_3
+
+- name: Create a Swarm cluster (idempotent, check mode)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: Create a Swarm cluster (force re-create)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ force: true
+ diff: true
+ register: output_5
+
+- name: Create a Swarm cluster (force re-create, check mode)
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ force: true
+ check_mode: true
+ diff: true
+ register: output_6
+
+- name: assert changed when create a new swarm cluster
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] | regex_search("New Swarm cluster created: ")'
+ - 'output_2.swarm_facts.JoinTokens.Manager'
+ - 'output_2.swarm_facts.JoinTokens.Worker'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## Removal #########################################################
+####################################################################
+
+- name: Remove a Swarm cluster (check mode)
+ docker_swarm:
+ state: absent
+ force: true
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: Remove a Swarm cluster
+ docker_swarm:
+ state: absent
+ force: true
+ diff: true
+ register: output_2
+
+- name: Remove a Swarm cluster (idempotent)
+ docker_swarm:
+ state: absent
+ force: true
+ diff: true
+ register: output_3
+
+- name: Remove a Swarm cluster (idempotent, check mode)
+ docker_swarm:
+ state: absent
+ force: true
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: assert changed when remove a swarm cluster
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Node has left the swarm cluster"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+
+- include_tasks: cleanup.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml
new file mode 100644
index 00000000..86661ecb
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options-ca.yml
@@ -0,0 +1,133 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- debug:
+ msg: Running tests/options-ca.yml
+- name: options-ca
+ when: cryptography_version.stdout is version('1.6', '>=')
+ block:
+ - name: Generate privatekey
+ loop:
+ - key1
+ - key2
+ loop_control:
+ loop_var: key
+ community.crypto.openssl_privatekey:
+ path: '{{ remote_tmp_dir }}/ansible_{{ key }}.key'
+ size: 2048
+ mode: '0666'
+ - name: Generate CSR
+ loop:
+ - key1
+ - key2
+ loop_control:
+ loop_var: key
+ community.crypto.openssl_csr:
+ path: '{{ remote_tmp_dir }}/ansible_{{ key }}.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/ansible_{{ key }}.key'
+ basic_constraints:
+ - CA:TRUE
+ key_usage:
+ - keyCertSign
+ - name: Generate self-signed certificate
+ loop:
+ - key1
+ - key2
+ loop_control:
+ loop_var: key
+ community.crypto.x509_certificate:
+ path: '{{ remote_tmp_dir }}/ansible_{{ key }}.pem'
+ privatekey_path: '{{ remote_tmp_dir }}/ansible_{{ key }}.key'
+ csr_path: '{{ remote_tmp_dir }}/ansible_{{ key }}.csr'
+ provider: selfsigned
+ - name: Load certificates
+ slurp:
+ src: '{{ remote_tmp_dir }}/{{ item }}'
+ loop:
+ - ansible_key1.pem
+ - ansible_key2.pem
+ register: ansible_certificates
+ - name: Load certificate keys
+ slurp:
+ src: '{{ remote_tmp_dir }}/{{ item }}'
+ loop:
+ - ansible_key1.key
+ - ansible_key2.key
+ register: ansible_keys
+ - name: signing_ca_cert and signing_ca_key (check mode)
+ docker_swarm:
+ advertise_addr: '{{ansible_default_ipv4.address | default("127.0.0.1")}}'
+ state: present
+ signing_ca_cert: '{{ ansible_certificates.results[0].content | b64decode }}'
+ signing_ca_key: '{{ ansible_keys.results[0].content | b64decode }}'
+ timeout: 120
+ check_mode: true
+ diff: true
+ register: output_1
+ ignore_errors: true
+ - name: signing_ca_cert and signing_ca_key
+ docker_swarm:
+ advertise_addr: '{{ansible_default_ipv4.address | default("127.0.0.1")}}'
+ state: present
+ signing_ca_cert: '{{ ansible_certificates.results[0].content | b64decode }}'
+ signing_ca_key: '{{ ansible_keys.results[0].content | b64decode }}'
+ timeout: 120
+ diff: true
+ register: output_2
+ ignore_errors: true
+ - name: Private key
+ debug: msg="{{ ansible_keys.results[0].content | b64decode }}"
+ - name: Cert
+ debug: msg="{{ ansible_certificates.results[0].content | b64decode }}"
+ - docker_swarm_info: null
+ register: output
+ ignore_errors: true
+ - debug: var=output
+ - name: signing_ca_cert and signing_ca_key (change, check mode)
+ docker_swarm:
+ state: present
+ signing_ca_cert: '{{ ansible_certificates.results[1].content | b64decode }}'
+ signing_ca_key: '{{ ansible_keys.results[1].content | b64decode }}'
+ timeout: 120
+ check_mode: true
+ diff: true
+ register: output_5
+ ignore_errors: true
+ - name: signing_ca_cert and signing_ca_key (change)
+ docker_swarm:
+ state: present
+ signing_ca_cert: '{{ ansible_certificates.results[1].content | b64decode }}'
+ signing_ca_key: '{{ ansible_keys.results[1].content | b64decode }}'
+ timeout: 120
+ diff: true
+ register: output_6
+ ignore_errors: true
+ - name: assert signing_ca_cert and signing_ca_key
+ assert:
+ that:
+ - output_1 is changed
+ - 'output_1.actions[0] | regex_search("New Swarm cluster created: ")'
+ - output_1.diff.before is defined
+ - output_1.diff.after is defined
+ - output_2 is changed
+ - 'output_2.actions[0] | regex_search("New Swarm cluster created: ")'
+ - output_2.diff.before is defined
+ - output_2.diff.after is defined
+ - output_5 is changed
+ - output_5.actions[0] == "Swarm cluster updated"
+ - output_5.diff.before is defined
+ - output_5.diff.after is defined
+ - output_6 is changed
+ - output_6.actions[0] == "Swarm cluster updated"
+ - output_6.diff.before is defined
+ - output_6.diff.after is defined
+ when: docker_py_version is version('2.6.0', '>=')
+ - assert:
+ that:
+ - output_1 is failed
+ - ('version is ' ~ docker_py_version ~ ' ') in output_1.msg
+ - '"Minimum version required is 2.6.0 " in output_1.msg'
+ when: docker_py_version is version('2.6.0', '<')
+ - include_tasks: cleanup.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options.yml
new file mode 100644
index 00000000..f88aa3f4
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/options.yml
@@ -0,0 +1,1163 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- debug:
+ msg: Running tests/options.yml
+
+- name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ name: default
+ diff: true
+
+####################################################################
+## autolock_managers ###############################################
+####################################################################
+
+- name: autolock_managers (check mode)
+ docker_swarm:
+ state: present
+ autolock_managers: true
+ check_mode: true
+ diff: true
+ register: output_1
+ ignore_errors: true
+
+- name: autolock_managers
+ docker_swarm:
+ state: present
+ autolock_managers: true
+ diff: true
+ register: output_2
+ ignore_errors: true
+
+- name: autolock_managers (idempotent)
+ docker_swarm:
+ state: present
+ autolock_managers: true
+ diff: true
+ register: output_3
+ ignore_errors: true
+
+- name: autolock_managers (idempotent, check mode)
+ docker_swarm:
+ state: present
+ autolock_managers: true
+ check_mode: true
+ diff: true
+ register: output_4
+ ignore_errors: true
+
+- name: autolock_managers (change, check mode)
+ docker_swarm:
+ state: present
+ autolock_managers: false
+ check_mode: true
+ diff: true
+ register: output_5
+ ignore_errors: true
+
+- name: autolock_managers (change)
+ docker_swarm:
+ state: present
+ autolock_managers: false
+ diff: true
+ register: output_6
+ ignore_errors: true
+
+- name: autolock_managers (force new swarm)
+ docker_swarm:
+ state: present
+ force: true
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ autolock_managers: true
+ diff: true
+ register: output_7
+ ignore_errors: true
+
+- name: assert autolock_managers changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+ when: docker_py_version is version('2.6.0', '>=')
+
+- name: assert UnlockKey in swarm_facts
+ assert:
+ that:
+ - 'output_2.swarm_facts.UnlockKey'
+ - 'output_3.swarm_facts.UnlockKey is none'
+ - 'output_6.swarm_facts.UnlockKey is none'
+ - 'output_7.swarm_facts.UnlockKey'
+ when: docker_py_version is version('2.7.0', '>=')
+
+- assert:
+ that:
+ - output_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output_1.msg"
+ - "'Minimum version required is 2.6.0 ' in output_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## ca_force_rotate #################################################
+####################################################################
+
+- name: ca_force_rotate (check mode)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 1
+ check_mode: true
+ diff: true
+ register: output_1
+ ignore_errors: true
+
+- name: ca_force_rotate
+ docker_swarm:
+ state: present
+ ca_force_rotate: 1
+ diff: true
+ register: output_2
+ ignore_errors: true
+
+- name: ca_force_rotate (idempotent)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 1
+ diff: true
+ register: output_3
+ ignore_errors: true
+
+- name: ca_force_rotate (idempotent, check mode)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 1
+ check_mode: true
+ diff: true
+ register: output_4
+ ignore_errors: true
+
+- name: ca_force_rotate (change, check mode)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 0
+ check_mode: true
+ diff: true
+ register: output_5
+ ignore_errors: true
+
+- name: ca_force_rotate (change)
+ docker_swarm:
+ state: present
+ ca_force_rotate: 0
+ diff: true
+ register: output_6
+ ignore_errors: true
+
+- name: assert ca_force_rotate changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - output_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output_1.msg"
+ - "'Minimum version required is 2.6.0 ' in output_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## dispatcher_heartbeat_period #####################################
+####################################################################
+
+- name: dispatcher_heartbeat_period (check mode)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 10
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: dispatcher_heartbeat_period
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 10
+ diff: true
+ register: output_2
+
+- name: dispatcher_heartbeat_period (idempotent)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 10
+ diff: true
+ register: output_3
+
+- name: dispatcher_heartbeat_period (idempotent, check mode)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 10
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: dispatcher_heartbeat_period (change, check mode)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 23
+ check_mode: true
+ diff: true
+ register: output_5
+
+- name: dispatcher_heartbeat_period (change)
+ docker_swarm:
+ state: present
+ dispatcher_heartbeat_period: 23
+ diff: true
+ register: output_6
+
+- name: assert dispatcher_heartbeat_period changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## election_tick ###################################################
+####################################################################
+
+- name: election_tick (check mode)
+ docker_swarm:
+ state: present
+ election_tick: 20
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: election_tick
+ docker_swarm:
+ state: present
+ election_tick: 20
+ diff: true
+ register: output_2
+
+- name: election_tick (idempotent)
+ docker_swarm:
+ state: present
+ election_tick: 20
+ diff: true
+ register: output_3
+
+- name: election_tick (idempotent, check mode)
+ docker_swarm:
+ state: present
+ election_tick: 20
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: election_tick (change, check mode)
+ docker_swarm:
+ state: present
+ election_tick: 5
+ check_mode: true
+ diff: true
+ register: output_5
+
+- name: election_tick (change)
+ docker_swarm:
+ state: present
+ election_tick: 5
+ diff: true
+ register: output_6
+
+- name: assert election_tick changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## heartbeat_tick ##################################################
+####################################################################
+
+- name: heartbeat_tick (check mode)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 2
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: heartbeat_tick
+ docker_swarm:
+ state: present
+ heartbeat_tick: 2
+ diff: true
+ register: output_2
+
+- name: heartbeat_tick (idempotent)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 2
+ diff: true
+ register: output_3
+
+- name: heartbeat_tick (idempotent, check mode)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 2
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: heartbeat_tick (change, check mode)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 3
+ check_mode: true
+ diff: true
+ register: output_5
+
+- name: heartbeat_tick (change)
+ docker_swarm:
+ state: present
+ heartbeat_tick: 3
+ diff: true
+ register: output_6
+
+- name: assert heartbeat_tick changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## keep_old_snapshots ##############################################
+####################################################################
+- name: keep_old_snapshots (check mode)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 1
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: keep_old_snapshots
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 1
+ diff: true
+ register: output_2
+
+- name: keep_old_snapshots (idempotent)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 1
+ diff: true
+ register: output_3
+
+- name: keep_old_snapshots (idempotent, check mode)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 1
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: keep_old_snapshots (change, check mode)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 2
+ check_mode: true
+ diff: true
+ register: output_5
+
+- name: keep_old_snapshots (change)
+ docker_swarm:
+ state: present
+ keep_old_snapshots: 2
+ diff: true
+ register: output_6
+
+- name: assert keep_old_snapshots changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## labels ##########################################################
+####################################################################
+- name: labels (check mode)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ b: v2
+ check_mode: true
+ diff: true
+ register: output_1
+ ignore_errors: true
+
+- name: labels
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ b: v2
+ diff: true
+ register: output_2
+ ignore_errors: true
+
+- name: labels (idempotent)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ b: v2
+ diff: true
+ register: output_3
+ ignore_errors: true
+
+- name: labels (idempotent, check mode)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ b: v2
+ check_mode: true
+ diff: true
+ register: output_4
+ ignore_errors: true
+
+- name: labels (change, check mode)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ c: v3
+ check_mode: true
+ diff: true
+ register: output_5
+ ignore_errors: true
+
+- name: labels (change)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ c: v3
+ diff: true
+ register: output_6
+ ignore_errors: true
+
+- name: labels (not specifying, check mode)
+ docker_swarm:
+ state: present
+ check_mode: true
+ diff: true
+ register: output_7
+ ignore_errors: true
+
+- name: labels (not specifying)
+ docker_swarm:
+ state: present
+ diff: true
+ register: output_8
+ ignore_errors: true
+
+- name: labels (idempotency, check that labels are still there)
+ docker_swarm:
+ state: present
+ labels:
+ a: v1
+ c: v3
+ diff: true
+ register: output_9
+ ignore_errors: true
+
+- name: labels (empty, check mode)
+ docker_swarm:
+ state: present
+ labels: {}
+ check_mode: true
+ diff: true
+ register: output_10
+ ignore_errors: true
+
+- name: labels (empty)
+ docker_swarm:
+ state: present
+ labels: {}
+ diff: true
+ register: output_11
+ ignore_errors: true
+
+- name: labels (empty, idempotent, check mode)
+ docker_swarm:
+ state: present
+ labels: {}
+ check_mode: true
+ diff: true
+ register: output_12
+ ignore_errors: true
+
+- name: labels (empty, idempotent)
+ docker_swarm:
+ state: present
+ labels: {}
+ diff: true
+ register: output_13
+ ignore_errors: true
+
+- name: assert labels changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+ - 'output_7 is not changed'
+ - 'output_7.actions[0] == "No modification"'
+ - 'output_7.diff.before is defined'
+ - 'output_7.diff.after is defined'
+ - 'output_8 is not changed'
+ - 'output_8.actions[0] == "No modification"'
+ - 'output_8.diff.before is defined'
+ - 'output_8.diff.after is defined'
+ - 'output_9 is not changed'
+ - 'output_9.actions[0] == "No modification"'
+ - 'output_9.diff.before is defined'
+ - 'output_9.diff.after is defined'
+ - 'output_10 is changed'
+ - 'output_10.actions[0] == "Swarm cluster updated"'
+ - 'output_10.diff.before is defined'
+ - 'output_10.diff.after is defined'
+ - 'output_11 is changed'
+ - 'output_11.actions[0] == "Swarm cluster updated"'
+ - 'output_11.diff.before is defined'
+ - 'output_11.diff.after is defined'
+ - 'output_12 is not changed'
+ - 'output_12.actions[0] == "No modification"'
+ - 'output_12.diff.before is defined'
+ - 'output_12.diff.after is defined'
+ - 'output_13 is not changed'
+ - 'output_13.actions[0] == "No modification"'
+ - 'output_13.diff.before is defined'
+ - 'output_13.diff.after is defined'
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - output_1 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output_1.msg"
+ - "'Minimum version required is 2.6.0 ' in output_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## log_entries_for_slow_followers ##################################
+####################################################################
+- name: log_entries_for_slow_followers (check mode)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 42
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: log_entries_for_slow_followers
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 42
+ diff: true
+ register: output_2
+
+- name: log_entries_for_slow_followers (idempotent)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 42
+ diff: true
+ register: output_3
+
+- name: log_entries_for_slow_followers (idempotent, check mode)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 42
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: log_entries_for_slow_followers (change, check mode)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 23
+ check_mode: true
+ diff: true
+ register: output_5
+
+- name: log_entries_for_slow_followers (change)
+ docker_swarm:
+ state: present
+ log_entries_for_slow_followers: 23
+ diff: true
+ register: output_6
+
+- name: assert log_entries_for_slow_followers changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## name ############################################################
+####################################################################
+- name: name (idempotent, check mode)
+ docker_swarm:
+ state: present
+ name: default
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: name (idempotent)
+ docker_swarm:
+ state: present
+ name: default
+ diff: true
+ register: output_2
+
+# The name 'default' is hardcoded in docker swarm. Trying to change
+# it causes a failure. This might change in the future, so we also
+# accept a change for this test.
+- name: name (change, should fail)
+ docker_swarm:
+ state: present
+ name: foobar
+ diff: true
+ register: output_3
+ ignore_errors: true
+
+- name: assert name changes
+ assert:
+ that:
+ - 'output_1 is not changed'
+ - 'output_1.actions[0] == "No modification"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is not changed'
+ - 'output_2.actions[0] == "No modification"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is failed or output_3 is changed'
+
+####################################################################
+## node_cert_expiry ################################################
+####################################################################
+- name: node_cert_expiry (check mode)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 7896000000000000
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: node_cert_expiry
+ docker_swarm:
+ state: present
+ node_cert_expiry: 7896000000000000
+ diff: true
+ register: output_2
+
+- name: node_cert_expiry (idempotent)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 7896000000000000
+ diff: true
+ register: output_3
+
+- name: node_cert_expiry (idempotent, check mode)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 7896000000000000
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: node_cert_expiry (change, check mode)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 8766000000000000
+ check_mode: true
+ diff: true
+ register: output_5
+
+- name: node_cert_expiry (change)
+ docker_swarm:
+ state: present
+ node_cert_expiry: 8766000000000000
+ diff: true
+ register: output_6
+
+- name: assert node_cert_expiry changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## rotate_manager_token ############################################
+####################################################################
+- name: rotate_manager_token (true, check mode)
+ docker_swarm:
+ state: present
+ rotate_manager_token: true
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: rotate_manager_token (true)
+ docker_swarm:
+ state: present
+ rotate_manager_token: true
+ diff: true
+ register: output_2
+
+- name: rotate_manager_token (false, idempotent)
+ docker_swarm:
+ state: present
+ rotate_manager_token: false
+ diff: true
+ register: output_3
+
+- name: rotate_manager_token (false, check mode)
+ docker_swarm:
+ state: present
+ rotate_manager_token: false
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: assert rotate_manager_token changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+
+####################################################################
+## rotate_worker_token #############################################
+####################################################################
+- name: rotate_worker_token (true, check mode)
+ docker_swarm:
+ state: present
+ rotate_worker_token: true
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: rotate_worker_token (true)
+ docker_swarm:
+ state: present
+ rotate_worker_token: true
+ diff: true
+ register: output_2
+
+- name: rotate_worker_token (false, idempotent)
+ docker_swarm:
+ state: present
+ rotate_worker_token: false
+ diff: true
+ register: output_3
+
+- name: rotate_worker_token (false, check mode)
+ docker_swarm:
+ state: present
+ rotate_worker_token: false
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: assert rotate_worker_token changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+
+####################################################################
+## snapshot_interval ###############################################
+####################################################################
+- name: snapshot_interval (check mode)
+ docker_swarm:
+ state: present
+ snapshot_interval: 12345
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: snapshot_interval
+ docker_swarm:
+ state: present
+ snapshot_interval: 12345
+ diff: true
+ register: output_2
+
+- name: snapshot_interval (idempotent)
+ docker_swarm:
+ state: present
+ snapshot_interval: 12345
+ diff: true
+ register: output_3
+
+- name: snapshot_interval (idempotent, check mode)
+ docker_swarm:
+ state: present
+ snapshot_interval: 12345
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: snapshot_interval (change, check mode)
+ docker_swarm:
+ state: present
+ snapshot_interval: 54321
+ check_mode: true
+ diff: true
+ register: output_5
+
+- name: snapshot_interval (change)
+ docker_swarm:
+ state: present
+ snapshot_interval: 54321
+ diff: true
+ register: output_6
+
+- name: assert snapshot_interval changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+####################################################################
+## task_history_retention_limit ####################################
+####################################################################
+- name: task_history_retention_limit (check mode)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 23
+ check_mode: true
+ diff: true
+ register: output_1
+
+- name: task_history_retention_limit
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 23
+ diff: true
+ register: output_2
+
+- name: task_history_retention_limit (idempotent)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 23
+ diff: true
+ register: output_3
+
+- name: task_history_retention_limit (idempotent, check mode)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 23
+ check_mode: true
+ diff: true
+ register: output_4
+
+- name: task_history_retention_limit (change, check mode)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 7
+ check_mode: true
+ diff: true
+ register: output_5
+
+- name: task_history_retention_limit (change)
+ docker_swarm:
+ state: present
+ task_history_retention_limit: 7
+ diff: true
+ register: output_6
+
+- name: assert task_history_retention_limit changes
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_1.actions[0] == "Swarm cluster updated"'
+ - 'output_1.diff.before is defined'
+ - 'output_1.diff.after is defined'
+ - 'output_2 is changed'
+ - 'output_2.actions[0] == "Swarm cluster updated"'
+ - 'output_2.diff.before is defined'
+ - 'output_2.diff.after is defined'
+ - 'output_3 is not changed'
+ - 'output_3.actions[0] == "No modification"'
+ - 'output_3.diff.before is defined'
+ - 'output_3.diff.after is defined'
+ - 'output_4 is not changed'
+ - 'output_4.actions[0] == "No modification"'
+ - 'output_4.diff.before is defined'
+ - 'output_4.diff.after is defined'
+ - 'output_5 is changed'
+ - 'output_5.actions[0] == "Swarm cluster updated"'
+ - 'output_5.diff.before is defined'
+ - 'output_5.diff.after is defined'
+ - 'output_6 is changed'
+ - 'output_6.actions[0] == "Swarm cluster updated"'
+ - 'output_6.diff.before is defined'
+ - 'output_6.diff.after is defined'
+
+- include_tasks: cleanup.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml
new file mode 100644
index 00000000..66f422e5
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm/tasks/tests/remote-addr-pool.yml
@@ -0,0 +1,95 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- debug:
+ msg: Running tests/remote-addr-pool.yml
+
+####################################################################
+## default_addr_pool ###############################################
+####################################################################
+
+- name: default_addr_pool
+ docker_swarm:
+ state: present
+ default_addr_pool:
+ - "2.0.0.0/16"
+ diff: true
+ register: output_1
+ ignore_errors: true
+
+- name: default_addr_pool (idempotent)
+ docker_swarm:
+ state: present
+ default_addr_pool:
+ - "2.0.0.0/16"
+ diff: true
+ register: output_2
+ ignore_errors: true
+
+- name: assert default_addr_pool
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_2 is not changed'
+ - 'output_2.swarm_facts.DefaultAddrPool == ["2.0.0.0/16"]'
+ when:
+ - docker_api_version is version('1.39', '>=')
+ - docker_py_version is version('4.0.0', '>=')
+
+- name: assert default_addr_pool failed when unsupported
+ assert:
+ that:
+ - 'output_1 is failed'
+ - "'Minimum version required' in output_1.msg"
+ when: docker_api_version is version('1.39', '<') or
+ docker_py_version is version('4.0.0', '<')
+
+####################################################################
+## subnet_size #####################################################
+####################################################################
+- name: Leave swarm
+ docker_swarm:
+ state: absent
+ force: true
+ default_addr_pool:
+ - "2.0.0.0/16"
+ diff: true
+
+- name: subnet_size
+ docker_swarm:
+ state: present
+ force: true
+ subnet_size: 26
+ diff: true
+ register: output_1
+ ignore_errors: true
+
+- name: subnet_size (idempotent)
+ docker_swarm:
+ state: present
+ subnet_size: 26
+ diff: true
+ register: output_2
+ ignore_errors: true
+
+- name: assert subnet_size
+ assert:
+ that:
+ - 'output_1 is changed'
+ - 'output_2 is not changed'
+ - 'output_2.swarm_facts.SubnetSize == 26'
+ when:
+ - docker_api_version is version('1.39', '>=')
+ - docker_py_version is version('4.0.0', '>=')
+
+- name: assert subnet_size failed when unsupported
+ assert:
+ that:
+ - output_1 is failed
+ - "'Minimum version required' in output_1.msg"
+ when: docker_api_version is version('1.39', '<') or
+ docker_py_version is version('4.0.0', '<')
+
+- include_tasks: cleanup.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/aliases
new file mode 100644
index 00000000..6f61c620
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/1
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/main.yml
new file mode 100644
index 00000000..b24184db
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_swarm_info.yml
+ when: docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_swarm_info tests!"
+ when: not(docker_py_version is version('1.10.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml
new file mode 100644
index 00000000..288e2a0b
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_info/tasks/test_swarm_info.yml
@@ -0,0 +1,194 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Try to get docker_swarm_info when docker is not running in swarm mode
+ docker_swarm_info:
+ ignore_errors: true
+ register: output
+
+ - name: assert failure when called when swarm is not in use or not run on mamager node
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == false'
+ - 'output.docker_swarm_manager == false'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ register: output
+
+ - name: assert changed when create a new swarm cluster
+ assert:
+ that:
+ - 'output is changed'
+ - 'output.actions[0] | regex_search("New Swarm cluster created: ")'
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+
+ - name: Try to get docker_swarm_info when docker is running in swarm mode and as manager
+ docker_swarm_info:
+ register: output
+
+ - name: assert creding docker swarm facts
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Try to get docker_swarm_info and list of nodes when docker is running in swarm mode and as manager
+ docker_swarm_info:
+ nodes: true
+ register: output
+
+ - name: assert reding swarm facts with list of nodes option
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.nodes[0].ID is string'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Get local docker node name
+ set_fact:
+ localnodename: "{{ output.nodes[0].Hostname }}"
+
+
+ - name: Try to get docker_swarm_info and verbose list of nodes when docker is running in swarm mode and as manager
+ docker_swarm_info:
+ nodes: true
+ verbose_output: true
+ register: output
+
+ - name: assert reading swarm facts with list of nodes and versbose output options
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.nodes[0].ID is string'
+ - 'output.nodes[0].CreatedAt'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Try to get docker_swarm_info and list of nodes with filters providing existing node name
+ docker_swarm_info:
+ nodes: true
+ nodes_filters:
+ name: "{{ localnodename }}"
+ register: output
+
+ - name: assert reading reading swarm facts and using node filter (random node name)
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.nodes | length == 1'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Create random name
+ set_fact:
+ randomnodename: "{{ 'node-%0x' % ((2**32) | random) }}"
+
+ - name: Try to get docker_swarm_info and list of nodes with filters providing non-existing random node name
+ docker_swarm_info:
+ nodes: true
+ nodes_filters:
+ name: "{{ randomnodename }}"
+ register: output
+
+ - name: assert reading reading swarm facts and using node filter (random node name)
+ assert:
+ that:
+ - 'output.swarm_facts.JoinTokens.Manager'
+ - 'output.swarm_facts.JoinTokens.Worker'
+ - 'output.swarm_facts.ID'
+ - 'output.nodes | length == 0'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ - 'output.swarm_unlock_key is not defined'
+
+ - name: Try to get docker_swarm_info and swarm_unlock_key on non a unlocked swarm
+ docker_swarm_info:
+ unlock_key: true
+ register: output
+ ignore_errors: true
+
+ - name: assert reading swarm facts and non existing swarm unlock key
+ assert:
+ that:
+ - 'output.swarm_unlock_key is none'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ when: docker_py_version is version('2.7.0', '>=')
+ - assert:
+ that:
+ - output is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output.msg"
+ - "'Minimum version required is 2.7.0 ' in output.msg"
+ when: docker_py_version is version('2.7.0', '<')
+
+ - name: Update swarm cluster to be locked
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ autolock_managers: true
+ register: autolock_managers_update_output
+ ignore_errors: true
+
+ - name: Try to get docker_swarm_info and swarm_unlock_key
+ docker_swarm_info:
+ unlock_key: true
+ register: output
+ ignore_errors: true
+
+ - name: assert reading swarm facts and swarm unlock key
+ assert:
+ that:
+ - 'output.swarm_unlock_key is string'
+ - 'output.swarm_unlock_key == autolock_managers_update_output.swarm_facts.UnlockKey'
+ - 'output.can_talk_to_docker == true'
+ - 'output.docker_swarm_active == true'
+ - 'output.docker_swarm_manager == true'
+ when: docker_py_version is version('2.7.0', '>=')
+ - assert:
+ that:
+ - output is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in output.msg"
+ - "'Minimum version required is 2.7.0 ' in output.msg"
+ when: docker_py_version is version('2.7.0', '<')
+
+ always:
+ - name: Cleanup
+ docker_swarm:
+ state: absent
+ force: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/aliases
new file mode 100644
index 00000000..fc581d54
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/3
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-1 b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-1
new file mode 100644
index 00000000..87bc9dec
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-1
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+TEST3=val3
+TEST4=val4
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-2 b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-2
new file mode 100644
index 00000000..7f36b44a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/files/env-file-2
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+TEST3=val5
+TEST5=val5
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/main.yml
new file mode 100644
index 00000000..5a5795b5
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/main.yml
@@ -0,0 +1,83 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+
+# Create random name prefix (for containers, networks, ...)
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+ service_names: []
+ network_names: []
+ config_names: []
+ secret_names: []
+ volume_names: []
+
+- debug:
+ msg: "Using container name prefix {{ name_prefix }}"
+
+# Run the tests
+- block:
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: Make sure all services are removed
+ docker_swarm_service:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ service_names }}"
+ ignore_errors: true
+
+ - name: Make sure all networks are removed
+ docker_network:
+ name: "{{ item }}"
+ state: absent
+ force: true
+ loop: "{{ network_names }}"
+ ignore_errors: true
+
+ - name: Make sure all configs are removed
+ docker_config:
+ name: "{{ item }}"
+ state: absent
+ force: true
+ loop: "{{ config_names }}"
+ ignore_errors: true
+
+ - name: Make sure all volumes are removed
+ docker_volume:
+ name: "{{ item }}"
+ state: absent
+ loop: "{{ volume_names }}"
+ ignore_errors: true
+
+ - name: Make sure all secrets are removed
+ docker_secret:
+ name: "{{ item }}"
+ state: absent
+ force: true
+ loop: "{{ secret_names }}"
+ ignore_errors: true
+
+ - name: Make sure swarm is removed
+ docker_swarm:
+ state: absent
+ force: true
+ ignore_errors: true
+ when: docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_swarm_service tests!"
+ when: not(docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml
new file mode 100644
index 00000000..9f2fa8c3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/configs.yml
@@ -0,0 +1,463 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-configs' }}"
+ config_name_1: "{{ name_prefix ~ '-configs-1' }}"
+ config_name_2: "{{ name_prefix ~ '-configs-2' }}"
+ config_name_3: "{{ name_prefix ~ '-configs-3' }}"
+
+- name: Registering container name
+ set_fact:
+ config_names: "{{ config_names + [config_name_1, config_name_2] }}"
+
+- docker_config:
+ name: "{{ config_name_1 }}"
+ data: "hello"
+ state: present
+ register: "config_result_1"
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- docker_config:
+ name: "{{ config_name_2 }}"
+ data: "test"
+ state: present
+ register: "config_result_2"
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- docker_config:
+ name: "{{ config_name_3 }}"
+ data: "config3"
+ state: present
+ rolling_versions: true
+ register: "config_result_3"
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+####################################################################
+## configs #########################################################
+####################################################################
+
+- name: configs
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ register: configs_1
+ ignore_errors: true
+
+- name: configs (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ register: configs_2
+ ignore_errors: true
+
+- name: configs (add)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ - config_name: "{{ config_name_2 }}"
+ filename: "/tmp/{{ config_name_2 }}.txt"
+ register: configs_3
+ ignore_errors: true
+
+- name: configs (add idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ - config_id: "{{ config_result_2.config_id|default('') }}"
+ config_name: "{{ config_name_2 }}"
+ filename: "/tmp/{{ config_name_2 }}.txt"
+ register: configs_4
+ ignore_errors: true
+
+- name: configs (add idempotency no id)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ - config_name: "{{ config_name_2 }}"
+ filename: "/tmp/{{ config_name_2 }}.txt"
+ register: configs_5
+ ignore_errors: true
+
+- name: configs (add idempotency no id and re-ordered)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_2 }}"
+ filename: "/tmp/{{ config_name_2 }}.txt"
+ - config_name: "{{ config_name_1 }}"
+ filename: "/tmp/{{ config_name_1 }}.txt"
+ register: configs_6
+ ignore_errors: true
+
+- name: configs (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs: []
+ register: configs_7
+ ignore_errors: true
+
+- name: configs (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs: []
+ register: configs_8
+ ignore_errors: true
+
+- name: rolling configs
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_3 }}_v1"
+ filename: "/run/configs/{{ config_name_3 }}.txt"
+ register: configs_9
+ ignore_errors: true
+
+- name: update rolling config
+ docker_config:
+ name: "{{ config_name_3 }}"
+ data: "newconfig3"
+ state: "present"
+ rolling_versions: true
+ register: configs_10
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+ ignore_errors: true
+
+- name: rolling configs service update
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_name: "{{ config_name_3 }}_v2"
+ filename: "/run/configs/{{ config_name_3 }}.txt"
+ register: configs_11
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - configs_1 is changed
+ - configs_2 is not changed
+ - configs_3 is changed
+ - configs_4 is not changed
+ - configs_5 is not changed
+ - configs_6 is not changed
+ - configs_7 is changed
+ - configs_8 is not changed
+ - configs_9 is changed
+ - configs_10 is not failed
+ - configs_11 is changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- assert:
+ that:
+ - configs_1 is failed
+ - "'Minimum version required' in configs_1.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## configs (uid) ###################################################
+####################################################################
+
+- name: configs (uid int)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: 1000
+ register: configs_1
+ ignore_errors: true
+
+- name: configs (uid int idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: 1000
+ register: configs_2
+ ignore_errors: true
+
+- name: configs (uid int change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: 1002
+ register: configs_3
+ ignore_errors: true
+
+- name: configs (uid str)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: "1001"
+ register: configs_4
+ ignore_errors: true
+
+- name: configs (uid str idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ uid: "1001"
+ register: configs_5
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+- assert:
+ that:
+ - configs_1 is changed
+ - configs_2 is not changed
+ - configs_3 is changed
+ - configs_4 is changed
+ - configs_5 is not changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- assert:
+ that:
+ - configs_1 is failed
+ - "'Minimum version required' in configs_1.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<')
+
+
+####################################################################
+## configs (gid) ###################################################
+####################################################################
+
+- name: configs (gid int)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: 1000
+ register: configs_1
+ ignore_errors: true
+
+- name: configs (gid int idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: 1000
+ register: configs_2
+ ignore_errors: true
+
+- name: configs (gid int change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: 1002
+ register: configs_3
+ ignore_errors: true
+
+- name: configs (gid str)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: "1001"
+ register: configs_4
+ ignore_errors: true
+
+- name: configs (gid str idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ gid: "1001"
+ register: configs_5
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+- assert:
+ that:
+ - configs_1 is changed
+ - configs_2 is not changed
+ - configs_3 is changed
+ - configs_4 is changed
+ - configs_5 is not changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- assert:
+ that:
+ - configs_1 is failed
+ - "'Minimum version required' in configs_1.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## configs (mode) ##################################################
+####################################################################
+
+- name: configs (mode)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ mode: 0600
+ register: configs_1
+ ignore_errors: true
+
+- name: configs (mode idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ mode: 0600
+ register: configs_2
+ ignore_errors: true
+
+- name: configs (mode change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ configs:
+ - config_id: "{{ config_result_1.config_id|default('') }}"
+ config_name: "{{ config_name_1 }}"
+ mode: 0777
+ register: configs_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+- assert:
+ that:
+ - configs_1 is changed
+ - configs_2 is not changed
+ - configs_3 is changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
+
+- assert:
+ that:
+ - configs_1 is failed
+ - "'Minimum version required' in configs_1.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete configs
+ docker_config:
+ name: "{{ config_name }}"
+ state: absent
+ force: true
+ loop:
+ - "{{ config_name_1 }}"
+ - "{{ config_name_2 }}"
+ - "{{ config_name_3 }}"
+ loop_control:
+ loop_var: config_name
+ ignore_errors: true
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('2.6.0', '>=')
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml
new file mode 100644
index 00000000..22947fbd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/logging.yml
@@ -0,0 +1,138 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-logging' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+####################################################################
+## logging.driver ##################################################
+####################################################################
+
+- name: logging.driver
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ register: logging_driver_1
+
+- name: logging.driver (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ register: logging_driver_2
+
+- name: logging.driver (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: syslog
+ register: logging_driver_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - logging_driver_1 is changed
+ - logging_driver_2 is not changed
+ - logging_driver_3 is changed
+
+####################################################################
+## logging.options #################################################
+####################################################################
+
+- name: logging_options
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options:
+ labels: production_status
+ env: os,customer
+ register: logging_options_1
+
+- name: logging_options (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options:
+ env: os,customer
+ labels: production_status
+ register: logging_options_2
+
+- name: logging_options (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options:
+ env: os,customer
+ labels: production_status
+ max-file: "1"
+ register: logging_options_3
+
+- name: logging_options (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options: {}
+ register: logging_options_4
+
+- name: logging_options (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ logging:
+ driver: json-file
+ options: {}
+ register: logging_options_5
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - logging_options_1 is changed
+ - logging_options_2 is not changed
+ - logging_options_3 is changed
+ - logging_options_4 is changed
+ - logging_options_5 is not changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml
new file mode 100644
index 00000000..a1e185e1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/misc.yml
@@ -0,0 +1,117 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- block:
+ - name: Create a swarm service without name
+ register: output
+ docker_swarm_service:
+ state: present
+ ignore_errors: true
+
+ - name: assert failure when name not set
+ assert:
+ that:
+ - output is failed
+ - 'output.msg == "missing required arguments: name"'
+
+ - name: Remove an non-existing service
+ register: output
+ docker_swarm_service:
+ state: absent
+ name: non_existing_service
+
+ - name: assert output not changed when deleting non-existing service
+ assert:
+ that:
+ - output is not changed
+
+ - name: create sample service
+ register: output
+ docker_swarm_service:
+ name: test_service
+ endpoint_mode: dnsrr
+ image: "{{ docker_test_image_busybox }}"
+ resolve_image: false
+ args:
+ - sleep
+ - "3600"
+
+ - name: assert sample service is created
+ assert:
+ that:
+ - output is changed
+
+ - name: change service args
+ register: output
+ docker_swarm_service:
+ name: test_service
+ image: "{{ docker_test_image_busybox }}"
+ resolve_image: false
+ args:
+ - sleep
+ - "1800"
+
+ - name: assert service args are correct
+ assert:
+ that:
+ - output.swarm_service.args == ['sleep', '1800']
+
+ - name: set service mode to global
+ register: output
+ docker_swarm_service:
+ name: test_service
+ image: "{{ docker_test_image_busybox }}"
+ resolve_image: false
+ endpoint_mode: vip
+ mode: global
+ args:
+ - sleep
+ - "1800"
+
+ - name: assert service mode changed caused service rebuild
+ assert:
+ that:
+ - output.rebuilt
+
+ - name: add published ports to service
+ register: output
+ docker_swarm_service:
+ name: test_service
+ image: "{{ docker_test_image_busybox }}"
+ resolve_image: false
+ mode: global
+ args:
+ - sleep
+ - "1800"
+ endpoint_mode: vip
+ publish:
+ - protocol: tcp
+ published_port: 60001
+ target_port: 60001
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+
+ - name: fake image key as it is not predictable
+ set_fact:
+ ansible_docker_service_output: "{{ output.swarm_service|combine({'image': docker_test_image_busybox}) }}"
+
+ - name: assert service matches expectations
+ assert:
+ that:
+ - ansible_docker_service_output == service_expected_output
+
+ - name: delete sample service
+ register: output
+ docker_swarm_service:
+ name: test_service
+ state: absent
+
+ - name: assert service deletion returns changed
+ assert:
+ that:
+ - output is success
+ - output is changed
+ when: docker_api_version is version('1.25', '>=') and docker_py_version is version('3.0.0', '>=')
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml
new file mode 100644
index 00000000..7605d9fc
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/mounts.yml
@@ -0,0 +1,606 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-mounts' }}"
+ volume_name_1: "{{ name_prefix ~ '-volume-1' }}"
+ volume_name_2: "{{ name_prefix ~ '-volume-2' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+ volume_names: "{{ volume_names + [volume_name_1, volume_name_2] }}"
+
+- docker_volume:
+ name: "{{ volume_name }}"
+ state: present
+ loop:
+ - "{{ volume_name_1 }}"
+ - "{{ volume_name_2 }}"
+ loop_control:
+ loop_var: volume_name
+
+####################################################################
+## mounts ##########################################################
+####################################################################
+
+- name: mounts
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ register: mounts_1
+
+- name: mounts (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ register: mounts_2
+
+- name: mounts (add)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ - source: "/tmp/"
+ target: "/tmp/{{ volume_name_2 }}"
+ type: "bind"
+ register: mounts_3
+
+- name: mounts (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "/tmp/"
+ target: "/tmp/{{ volume_name_2 }}"
+ type: "bind"
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ register: mounts_4
+
+- name: mounts (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts: []
+ register: mounts_5
+
+- name: mounts (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts: []
+ register: mounts_6
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mounts_1 is changed
+ - mounts_2 is not changed
+ - mounts_3 is changed
+ - mounts_4 is not changed
+ - mounts_5 is changed
+ - mounts_6 is not changed
+
+####################################################################
+## mounts.readonly #################################################
+####################################################################
+
+- name: mounts.readonly
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ readonly: true
+ register: mounts_readonly_1
+
+
+- name: mounts.readonly (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ readonly: true
+ register: mounts_readonly_2
+
+- name: mounts.readonly (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ readonly: false
+ register: mounts_readonly_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mounts_readonly_1 is changed
+ - mounts_readonly_2 is not changed
+ - mounts_readonly_3 is changed
+
+####################################################################
+## mounts.propagation ##############################################
+####################################################################
+
+- name: mounts.propagation
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "/tmp"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "bind"
+ propagation: "slave"
+ register: mounts_propagation_1
+
+
+- name: mounts.propagation (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "/tmp"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "bind"
+ propagation: "slave"
+ register: mounts_propagation_2
+
+- name: mounts.propagation (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "/tmp"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "bind"
+ propagation: "rprivate"
+ register: mounts_propagation_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mounts_propagation_1 is changed
+ - mounts_propagation_2 is not changed
+ - mounts_propagation_3 is changed
+
+####################################################################
+## mounts.labels ##################################################
+####################################################################
+
+- name: mounts.labels
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ labels:
+ mylabel: hello-world
+ my-other-label: hello-mars
+ register: mounts_labels_1
+
+
+- name: mounts.labels (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ labels:
+ mylabel: hello-world
+ my-other-label: hello-mars
+ register: mounts_labels_2
+
+- name: mounts.labels (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ labels:
+ mylabel: hello-world
+ register: mounts_labels_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mounts_labels_1 is changed
+ - mounts_labels_2 is not changed
+ - mounts_labels_3 is changed
+
+####################################################################
+## mounts.no_copy ##################################################
+####################################################################
+
+- name: mounts.no_copy
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ no_copy: true
+ register: mounts_no_copy_1
+
+
+- name: mounts.no_copy (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ no_copy: true
+ register: mounts_no_copy_2
+
+- name: mounts.no_copy (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ no_copy: false
+ register: mounts_no_copy_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mounts_no_copy_1 is changed
+ - mounts_no_copy_2 is not changed
+ - mounts_no_copy_3 is changed
+
+####################################################################
+## mounts.driver_config ############################################
+####################################################################
+
+- name: mounts.driver_config
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ driver_config:
+ name: "nfs"
+ options:
+ addr: "127.0.0.1"
+ register: mounts_driver_config_1
+
+- name: mounts.driver_config
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ driver_config:
+ name: "nfs"
+ options:
+ addr: "127.0.0.1"
+ register: mounts_driver_config_2
+
+- name: mounts.driver_config
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "volume"
+ driver_config:
+ name: "local"
+ register: mounts_driver_config_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mounts_driver_config_1 is changed
+ - mounts_driver_config_2 is not changed
+ - mounts_driver_config_3 is changed
+
+####################################################################
+## mounts.tmpfs_size ###############################################
+####################################################################
+
+- name: mounts.tmpfs_size
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_size: "50M"
+ register: mounts_tmpfs_size_1
+ ignore_errors: true
+
+- name: mounts.tmpfs_size (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_size: "50M"
+ register: mounts_tmpfs_size_2
+ ignore_errors: true
+
+- name: mounts.tmpfs_size (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_size: "25M"
+ register: mounts_tmpfs_size_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mounts_tmpfs_size_1 is changed
+ - mounts_tmpfs_size_2 is not changed
+ - mounts_tmpfs_size_3 is changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - mounts_tmpfs_size_1 is failed
+ - "'Minimum version required' in mounts_tmpfs_size_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## mounts.tmpfs_mode ###############################################
+####################################################################
+
+- name: mounts.tmpfs_mode
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_mode: 0444
+ register: mounts_tmpfs_mode_1
+ ignore_errors: true
+
+- name: mounts.tmpfs_mode (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_mode: 0444
+ register: mounts_tmpfs_mode_2
+ ignore_errors: true
+
+- name: mounts.tmpfs_mode (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: "{{ volume_name_1 }}"
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ tmpfs_mode: 0777
+ register: mounts_tmpfs_mode_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mounts_tmpfs_mode_1 is changed
+ - mounts_tmpfs_mode_2 is not changed
+ - mounts_tmpfs_mode_3 is changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - mounts_tmpfs_size_1 is failed
+ - "'Minimum version required' in mounts_tmpfs_size_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## mounts.source ###################################################
+####################################################################
+
+- name: mounts.source (empty for tmpfs)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: ""
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ register: mounts_tmpfs_source_1
+ ignore_errors: true
+
+- name: mounts.source (empty for tmpfs idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - source: ""
+ target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ register: mounts_tmpfs_source_2
+ ignore_errors: true
+
+- name: mounts.source (not specified for tmpfs idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mounts:
+ - target: "/tmp/{{ volume_name_1 }}"
+ type: "tmpfs"
+ register: mounts_tmpfs_source_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mounts_tmpfs_source_1 is changed
+ - mounts_tmpfs_source_2 is not changed
+ - mounts_tmpfs_source_3 is not changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - mounts_tmpfs_source_1 is failed
+ - "'Minimum version required' in mounts_tmpfs_source_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete volumes
+ docker_volume:
+ name: "{{ volume_name }}"
+ state: absent
+ loop:
+ - "{{ volume_name_1 }}"
+ - "{{ volume_name_2 }}"
+ loop_control:
+ loop_var: volume_name
+ ignore_errors: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml
new file mode 100644
index 00000000..f57824f9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/networks.yml
@@ -0,0 +1,453 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-networks' }}"
+ network_name_1: "{{ name_prefix ~ '-network-1' }}"
+ network_name_2: "{{ name_prefix ~ '-network-2' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+ network_names: "{{ network_names + [network_name_1, network_name_2] }}"
+
+- docker_network:
+ name: "{{ network_name }}"
+ driver: "overlay"
+ state: present
+ loop:
+ - "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ loop_control:
+ loop_var: network_name
+
+#####################################################################
+## networks #########################################################
+#####################################################################
+
+- name: networks
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_1 }}"
+ register: networks_1
+
+- name: networks (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_1 }}"
+ register: networks_2
+
+- name: networks (dict idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ register: networks_3
+
+- name: networks (change more)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ register: networks_4
+
+- name: networks (change more idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ register: networks_5
+
+- name: networks (change more dict idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ - name: "{{ network_name_2 }}"
+ register: networks_6
+
+- name: networks (change more mixed idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ register: networks_7
+
+- name: networks (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_2 }}"
+ - name: "{{ network_name_1 }}"
+ register: networks_8
+
+- name: networks (change less)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_2 }}"
+ register: networks_9
+
+- name: networks (change less idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "{{ network_name_2 }}"
+ register: networks_10
+
+- name: networks (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks: []
+ register: networks_11
+
+- name: networks (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks: []
+ register: networks_12
+
+- name: networks (unknown network)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - "idonotexist"
+ register: networks_13
+ ignore_errors: true
+
+- name: networks (missing dict key name)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - foo: "bar"
+ register: networks_14
+ ignore_errors: true
+
+- name: networks (invalid list type)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - [1, 2, 3]
+ register: networks_15
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - networks_1 is changed
+ - networks_2 is not changed
+ - networks_3 is not changed
+ - networks_4 is changed
+ - networks_5 is not changed
+ - networks_6 is not changed
+ - networks_7 is not changed
+ - networks_8 is not changed
+ - networks_9 is changed
+ - networks_10 is not changed
+ - networks_11 is changed
+ - networks_12 is not changed
+ - networks_13 is failed
+ - '"Could not find a network named: ''idonotexist''" in networks_13.msg'
+ - networks_14 is failed
+ - "'\"name\" is required when networks are passed as dictionaries.' in networks_14.msg"
+ - networks_15 is failed
+ - "'Only a list of strings or dictionaries are allowed to be passed as networks' in networks_15.msg"
+
+- assert:
+ that:
+ - networks_4.rebuilt == false
+ - networks_7.rebuilt == false
+ when: docker_api_version is version('1.29', '>=') and docker_py_version is version('2.7.0', '>=')
+
+- assert:
+ that:
+ - networks_4.rebuilt == true
+ - networks_7.rebuilt == true
+ when: docker_api_version is version('1.29', '<') or docker_py_version is version('2.7.0', '<')
+
+####################################################################
+## networks.aliases ################################################
+####################################################################
+
+- name: networks.aliases
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - "alias1"
+ - "alias2"
+ register: networks_aliases_1
+
+- name: networks.aliases (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - "alias1"
+ - "alias2"
+ register: networks_aliases_2
+
+- name: networks.aliases (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - "alias2"
+ - "alias1"
+ register: networks_aliases_3
+
+- name: networks.aliases (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - "alias1"
+ register: networks_aliases_4
+
+- name: networks.aliases (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases: []
+ register: networks_aliases_5
+
+- name: networks.aliases (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases: []
+ register: networks_aliases_6
+
+- name: networks.aliases (invalid type)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ aliases:
+ - [1, 2, 3]
+ register: networks_aliases_7
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - networks_aliases_1 is changed
+ - networks_aliases_2 is not changed
+ - networks_aliases_3 is not changed
+ - networks_aliases_4 is changed
+ - networks_aliases_5 is changed
+ - networks_aliases_6 is not changed
+ - networks_aliases_7 is failed
+ - "'Only strings are allowed as network aliases' in networks_aliases_7.msg"
+
+####################################################################
+## networks.options ################################################
+####################################################################
+
+- name: networks.options
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options:
+ foo: bar
+ test: hello
+ register: networks_options_1
+
+- name: networks.options (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options:
+ foo: bar
+ test: hello
+ register: networks_options_2
+
+- name: networks.options (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options:
+ foo: bar
+ test: hej
+ register: networks_options_3
+
+- name: networks.options (change less)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options:
+ foo: bar
+ register: networks_options_4
+
+- name: networks.options (invalid type)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options: [1, 2, 3]
+ register: networks_options_5
+ ignore_errors: true
+
+- name: networks.options (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options: {}
+ register: networks_options_6
+
+- name: networks.options (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ networks:
+ - name: "{{ network_name_1 }}"
+ options: {}
+ register: networks_options_7
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - networks_options_1 is changed
+ - networks_options_2 is not changed
+ - networks_options_3 is changed
+ - networks_options_4 is changed
+ - networks_options_5 is failed
+ - "'Only dict is allowed as network options' in networks_options_5.msg"
+ - networks_options_6 is changed
+ - networks_options_7 is not changed
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete networks
+ docker_network:
+ name: "{{ network_name }}"
+ state: absent
+ force: true
+ loop:
+ - "{{ network_name_1 }}"
+ - "{{ network_name_2 }}"
+ loop_control:
+ loop_var: network_name
+ ignore_errors: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml
new file mode 100644
index 00000000..4cc6a574
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/options.yml
@@ -0,0 +1,2005 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-options' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+####################################################################
+## args ############################################################
+####################################################################
+
+- name: args
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ args:
+ - sleep
+ - "3600"
+ register: args_1
+
+- name: args (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ args:
+ - sleep
+ - "3600"
+ register: args_2
+
+- name: args (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ args:
+ - sleep
+ - "3400"
+ register: args_3
+
+- name: args (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ args: []
+ register: args_4
+
+- name: args (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ args: []
+ register: args_5
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - args_1 is changed
+ - args_2 is not changed
+ - args_3 is changed
+ - args_4 is changed
+ - args_5 is not changed
+
+####################################################################
+## command #########################################################
+####################################################################
+
+- name: command
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ register: command_1
+
+- name: command (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ register: command_2
+
+- name: command (less parameters)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -c "sleep 10m"'
+ register: command_3
+
+- name: command (as list)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command:
+ - "/bin/sh"
+ - "-c"
+ - "sleep 10m"
+ register: command_4
+
+- name: command (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: []
+ register: command_5
+
+- name: command (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: []
+ register: command_6
+
+- name: command (string failure)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: true
+ register: command_7
+ ignore_errors: true
+
+- name: command (list failure)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command:
+ - "/bin/sh"
+ - yes
+ register: command_8
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - command_1 is changed
+ - command_2 is not changed
+ - command_3 is changed
+ - command_4 is not changed
+ - command_5 is changed
+ - command_6 is not changed
+ - command_7 is failed
+ - command_8 is failed
+
+####################################################################
+## container_labels ################################################
+####################################################################
+
+- name: container_labels
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels:
+ test_1: "1"
+ test_2: "2"
+ register: container_labels_1
+
+- name: container_labels (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels:
+ test_1: "1"
+ test_2: "2"
+ register: container_labels_2
+
+- name: container_labels (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels:
+ test_1: "1"
+ test_2: "3"
+ register: container_labels_3
+
+- name: container_labels (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels: {}
+ register: container_labels_4
+
+- name: container_labels (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ container_labels: {}
+ register: container_labels_5
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - container_labels_1 is changed
+ - container_labels_2 is not changed
+ - container_labels_3 is changed
+ - container_labels_4 is changed
+ - container_labels_5 is not changed
+
+####################################################################
+## dns #############################################################
+####################################################################
+
+- name: dns
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: dns_1
+ ignore_errors: true
+
+- name: dns (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns:
+ - 1.1.1.1
+ - 8.8.8.8
+ register: dns_2
+ ignore_errors: true
+
+- name: dns_servers (changed order)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns:
+ - 8.8.8.8
+ - 1.1.1.1
+ register: dns_3
+ ignore_errors: true
+
+- name: dns_servers (changed elements)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns:
+ - 8.8.8.8
+ - 9.9.9.9
+ register: dns_4
+ ignore_errors: true
+
+- name: dns_servers (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns: []
+ register: dns_5
+ ignore_errors: true
+
+- name: dns_servers (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns: []
+ register: dns_6
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - dns_1 is changed
+ - dns_2 is not changed
+ - dns_3 is changed
+ - dns_4 is changed
+ - dns_5 is changed
+ - dns_6 is not changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - dns_1 is failed
+ - "'Minimum version required' in dns_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## dns_options #####################################################
+####################################################################
+
+- name: dns_options
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options:
+ - "timeout:10"
+ - rotate
+ register: dns_options_1
+ ignore_errors: true
+
+- name: dns_options (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options:
+ - "timeout:10"
+ - rotate
+ register: dns_options_2
+ ignore_errors: true
+
+- name: dns_options (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options:
+ - "timeout:10"
+ - no-check-names
+ register: dns_options_3
+ ignore_errors: true
+
+- name: dns_options (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options:
+ - no-check-names
+ - "timeout:10"
+ register: dns_options_4
+ ignore_errors: true
+
+- name: dns_options (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options: []
+ register: dns_options_5
+ ignore_errors: true
+
+- name: dns_options (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_options: []
+ register: dns_options_6
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - dns_options_1 is changed
+ - dns_options_2 is not changed
+ - dns_options_3 is changed
+ - dns_options_4 is not changed
+ - dns_options_5 is changed
+ - dns_options_6 is not changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - dns_options_1 is failed
+ - "'Minimum version required' in dns_options_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## dns_search ######################################################
+####################################################################
+
+- name: dns_search
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search:
+ - example.com
+ - example.org
+ register: dns_search_1
+ ignore_errors: true
+
+- name: dns_search (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search:
+ - example.com
+ - example.org
+ register: dns_search_2
+ ignore_errors: true
+
+- name: dns_search (different order)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search:
+ - example.org
+ - example.com
+ register: dns_search_3
+ ignore_errors: true
+
+- name: dns_search (changed elements)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search:
+ - ansible.com
+ - example.com
+ register: dns_search_4
+ ignore_errors: true
+
+- name: dns_search (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search: []
+ register: dns_search_5
+ ignore_errors: true
+
+- name: dns_search (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ dns_search: []
+ register: dns_search_6
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - dns_search_1 is changed
+ - dns_search_2 is not changed
+ - dns_search_3 is changed
+ - dns_search_4 is changed
+ - dns_search_5 is changed
+ - dns_search_6 is not changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - dns_search_1 is failed
+ - "'Minimum version required' in dns_search_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## endpoint_mode ###################################################
+####################################################################
+
+- name: endpoint_mode
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ endpoint_mode: "dnsrr"
+ register: endpoint_mode_1
+ ignore_errors: true
+
+- name: endpoint_mode (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ endpoint_mode: "dnsrr"
+ register: endpoint_mode_2
+ ignore_errors: true
+
+- name: endpoint_mode (changes)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ endpoint_mode: "vip"
+ register: endpoint_mode_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - endpoint_mode_1 is changed
+ - endpoint_mode_2 is not changed
+ - endpoint_mode_3 is changed
+ when: docker_py_version is version('3.0.0', '>=')
+- assert:
+ that:
+ - endpoint_mode_1 is failed
+ - "'Minimum version required' in endpoint_mode_1.msg"
+ when: docker_py_version is version('3.0.0', '<')
+
+####################################################################
+## env #############################################################
+####################################################################
+
+- name: env
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ env:
+ - "TEST1=val1"
+ - "TEST2=val2"
+ register: env_1
+
+- name: env (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ env:
+ TEST1: val1
+ TEST2: val2
+ register: env_2
+
+- name: env (changes)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ env:
+ - "TEST1=val1"
+ - "TEST2=val3"
+ register: env_3
+
+- name: env (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ env:
+ - "TEST2=val3"
+ - "TEST1=val1"
+ register: env_4
+
+- name: env (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ env: []
+ register: env_5
+
+- name: env (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ env: []
+ register: env_6
+
+- name: env (fail unwrapped values)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ env:
+ TEST1: true
+ register: env_7
+ ignore_errors: true
+
+- name: env (fail invalid formatted string)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ env:
+ - "TEST1=val3"
+ - "TEST2"
+ register: env_8
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - env_1 is changed
+ - env_2 is not changed
+ - env_3 is changed
+ - env_4 is not changed
+ - env_5 is changed
+ - env_6 is not changed
+ - env_7 is failed
+ - env_8 is failed
+
+####################################################################
+## env_files #######################################################
+####################################################################
+
+- name: Copy env-files
+ copy:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/{{ item }}"
+ loop:
+ - env-file-1
+ - env-file-2
+
+- name: env_files
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ env_files:
+ - "{{ remote_tmp_dir }}/env-file-1"
+ register: env_file_1
+
+- name: env_files (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ env_files:
+ - "{{ remote_tmp_dir }}/env-file-1"
+ register: env_file_2
+
+- name: env_files (more items)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ env_files:
+ - "{{ remote_tmp_dir }}/env-file-1"
+ - "{{ remote_tmp_dir }}/env-file-2"
+ register: env_file_3
+
+- name: env_files (order)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ env_files:
+ - "{{ remote_tmp_dir }}/env-file-2"
+ - "{{ remote_tmp_dir }}/env-file-1"
+ register: env_file_4
+
+- name: env_files (multiple idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ env_files:
+ - "{{ remote_tmp_dir }}/env-file-2"
+ - "{{ remote_tmp_dir }}/env-file-1"
+ register: env_file_5
+
+- name: env_files (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ env_files: []
+ register: env_file_6
+
+- name: env_files (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ env_files: []
+ register: env_file_7
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - env_file_1 is changed
+ - env_file_2 is not changed
+ - env_file_3 is changed
+ - env_file_4 is changed
+ - env_file_5 is not changed
+ - env_file_6 is changed
+ - env_file_7 is not changed
+
+###################################################################
+## force_update ###################################################
+###################################################################
+
+- name: force_update
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ args:
+ - sleep
+ - "3600"
+ force_update: true
+ register: force_update_1
+ ignore_errors: true
+
+- name: force_update (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ args:
+ - sleep
+ - "3600"
+ force_update: true
+ register: force_update_2
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - force_update_1 is changed
+ - force_update_2 is changed
+ when: docker_py_version is version('2.1.0', '>=')
+- assert:
+ that:
+ - force_update_1 is failed
+ - "'Minimum version required' in force_update_1.msg"
+ when: docker_py_version is version('2.1.0', '<')
+
+####################################################################
+## groups ##########################################################
+####################################################################
+
+- name: groups
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups:
+ - "1234"
+ - "5678"
+ register: groups_1
+ ignore_errors: true
+
+- name: groups (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups:
+ - "1234"
+ - "5678"
+ register: groups_2
+ ignore_errors: true
+
+- name: groups (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups:
+ - "5678"
+ - "1234"
+ register: groups_3
+ ignore_errors: true
+
+- name: groups (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups:
+ - "1234"
+ register: groups_4
+ ignore_errors: true
+
+- name: groups (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups: []
+ register: groups_5
+ ignore_errors: true
+
+- name: groups (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ groups: []
+ register: groups_6
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - groups_1 is changed
+ - groups_2 is not changed
+ - groups_3 is not changed
+ - groups_4 is changed
+ - groups_5 is changed
+ - groups_6 is not changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - groups_1 is failed
+ - "'Minimum version required' in groups_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## healthcheck #####################################################
+####################################################################
+
+- name: healthcheck
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - "1"
+ timeout: 2s
+ interval: 0h0m2s3ms4us
+ retries: 2
+ start_period: 20s
+ register: healthcheck_1
+ ignore_errors: true
+
+- name: healthcheck (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - 1
+ timeout: 2s
+ interval: 0h0m2s3ms4us
+ retries: 2
+ start_period: 20s
+ register: healthcheck_2
+ ignore_errors: true
+
+- name: healthcheck (changed)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - CMD
+ - sleep
+ - "1"
+ timeout: 3s
+ interval: 0h1m2s3ms4us
+ retries: 3
+ register: healthcheck_3
+ ignore_errors: true
+
+- name: healthcheck (disabled)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - NONE
+ register: healthcheck_4
+ ignore_errors: true
+
+- name: healthcheck (disabled, idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test:
+ - NONE
+ register: healthcheck_5
+ ignore_errors: true
+
+- name: healthcheck (string in healthcheck test, changed)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test: "sleep 1"
+ register: healthcheck_6
+ ignore_errors: true
+
+- name: healthcheck (string in healthcheck test, idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck:
+ test: "sleep 1"
+ register: healthcheck_7
+ ignore_errors: true
+
+- name: healthcheck (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck: {}
+ register: healthcheck_8
+ ignore_errors: true
+
+- name: healthcheck (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ healthcheck: {}
+ register: healthcheck_9
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - healthcheck_1 is changed
+ - healthcheck_2 is not changed
+ - healthcheck_3 is changed
+ - healthcheck_4 is changed
+ - healthcheck_5 is not changed
+ - healthcheck_6 is changed
+ - healthcheck_7 is not changed
+ - healthcheck_8 is changed
+ - healthcheck_9 is not changed
+ when: docker_api_version is version('1.29', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - healthcheck_1 is failed
+ - "'Minimum version required' in healthcheck_1.msg"
+ when: docker_api_version is version('1.29', '<') or docker_py_version is version('2.6.0', '<')
+
+###################################################################
+## hostname #######################################################
+###################################################################
+
+- name: hostname
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ hostname: me.example.com
+ register: hostname_1
+ ignore_errors: true
+
+- name: hostname (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ hostname: me.example.com
+ register: hostname_2
+ ignore_errors: true
+
+- name: hostname (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ hostname: me.example.org
+ register: hostname_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - hostname_1 is changed
+ - hostname_2 is not changed
+ - hostname_3 is changed
+ when: docker_py_version is version('2.2.0', '>=')
+- assert:
+ that:
+ - hostname_1 is failed
+ - "'Minimum version required' in hostname_1.msg"
+ when: docker_py_version is version('2.2.0', '<')
+
+###################################################################
+## hosts ##########################################################
+###################################################################
+
+- name: hosts
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ hosts:
+ example.com: 1.2.3.4
+ example.org: 4.3.2.1
+ register: hosts_1
+ ignore_errors: true
+
+- name: hosts (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ hosts:
+ example.com: 1.2.3.4
+ example.org: 4.3.2.1
+ register: hosts_2
+ ignore_errors: true
+
+- name: hosts (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ hosts:
+ example.com: 1.2.3.4
+ register: hosts_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - hosts_1 is changed
+ - hosts_2 is not changed
+ - hosts_3 is changed
+ when: docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - hosts_1 is failed
+ - "'Minimum version required' in hosts_1.msg"
+ when: docker_py_version is version('2.6.0', '<')
+
+
+###################################################################
+## image ##########################################################
+###################################################################
+
+- name: image
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ register: image_1
+
+- name: image (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ register: image_2
+
+- name: image (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine_different }}"
+ register: image_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - image_1 is changed
+ - image_2 is not changed
+ - image_3 is changed
+
+####################################################################
+## labels ##########################################################
+####################################################################
+
+- name: labels
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels:
+ test_1: "1"
+ test_2: "2"
+ register: labels_1
+
+- name: labels (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels:
+ test_1: "1"
+ test_2: "2"
+ register: labels_2
+
+- name: labels (changes)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels:
+ test_1: "1"
+ test_2: "2"
+ test_3: "3"
+ register: labels_3
+
+- name: labels (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels: {}
+ register: labels_4
+
+- name: labels (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ labels: {}
+ register: labels_5
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - labels_1 is changed
+ - labels_2 is not changed
+ - labels_3 is changed
+ - labels_4 is changed
+ - labels_5 is not changed
+
+###################################################################
+## mode ###########################################################
+###################################################################
+
+- name: mode
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mode: "replicated"
+ replicas: 1
+ register: mode_1
+
+- name: mode (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mode: "replicated"
+ replicas: 1
+ register: mode_2
+
+- name: mode (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ mode: "global"
+ replicas: 1
+ register: mode_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - mode_1 is changed
+ - mode_2 is not changed
+ - mode_3 is changed
+
+####################################################################
+## stop_grace_period ###############################################
+####################################################################
+
+- name: stop_grace_period
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_grace_period: 60s
+ register: stop_grace_period_1
+
+- name: stop_grace_period (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_grace_period: 60s
+ register: stop_grace_period_2
+
+- name: stop_grace_period (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_grace_period: 1m30s
+ register: stop_grace_period_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - stop_grace_period_1 is changed
+ - stop_grace_period_2 is not changed
+ - stop_grace_period_3 is changed
+
+####################################################################
+## stop_signal #####################################################
+####################################################################
+
+- name: stop_signal
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_signal: "30"
+ register: stop_signal_1
+ ignore_errors: true
+
+- name: stop_signal (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_signal: "30"
+ register: stop_signal_2
+ ignore_errors: true
+
+- name: stop_signal (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ stop_signal: "9"
+ register: stop_signal_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - stop_signal_1 is changed
+ - stop_signal_2 is not changed
+ - stop_signal_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - stop_signal_1 is failed
+ - "'Minimum version required' in stop_signal_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('2.6.0', '<')
+
+####################################################################
+## publish #########################################################
+####################################################################
+
+- name: publish
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: tcp
+ published_port: 60001
+ target_port: 60001
+ - protocol: udp
+ published_port: 60002
+ target_port: 60002
+ register: publish_1
+ ignore_errors: true
+
+- name: publish (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: udp
+ published_port: 60002
+ target_port: 60002
+ - published_port: 60001
+ target_port: 60001
+ register: publish_2
+ ignore_errors: true
+
+- name: publish (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: tcp
+ published_port: 60002
+ target_port: 60003
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+ register: publish_3
+ ignore_errors: true
+
+- name: publish (mode)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: tcp
+ published_port: 60002
+ target_port: 60003
+ mode: host
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+ mode: host
+ register: publish_4
+ ignore_errors: true
+
+- name: publish (mode idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+ mode: host
+ - protocol: tcp
+ published_port: 60002
+ target_port: 60003
+ mode: host
+ register: publish_5
+ ignore_errors: true
+
+- name: publish (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish: []
+ register: publish_6
+ ignore_errors: true
+
+- name: publish (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish: []
+ register: publish_7
+ ignore_errors: true
+
+- name: publish (publishes the same port with both protocols)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: udp
+ published_port: 60001
+ target_port: 60001
+ mode: host
+ - protocol: tcp
+ published_port: 60001
+ target_port: 60001
+ mode: host
+ register: publish_8
+ ignore_errors: true
+- name: gather service info
+ docker_swarm_service_info:
+ name: "{{ service_name }}"
+ register: publish_8_info
+
+- name: publish (without published_port)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: udp
+ target_port: 60001
+ mode: host
+ register: publish_9
+ ignore_errors: true
+- name: gather service info
+ docker_swarm_service_info:
+ name: "{{ service_name }}"
+ register: publish_9_info
+
+- name: publish (without published_port, idempotence)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ publish:
+ - protocol: udp
+ target_port: 60001
+ mode: host
+ register: publish_10
+ ignore_errors: true
+- name: gather service info
+ docker_swarm_service_info:
+ name: "{{ service_name }}"
+ register: publish_10_info
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - publish_1 is changed
+ - publish_2 is not changed
+ - publish_3 is changed
+ - publish_4 is changed
+ - publish_5 is not changed
+ - publish_6 is changed
+ - publish_7 is not changed
+ - publish_8 is changed
+ - (publish_8_info.service.Endpoint.Ports | length) == 2
+ - publish_9 is changed
+ - publish_10 is not changed
+ when: docker_py_version is version('3.0.0', '>=')
+- assert:
+ that:
+ - publish_1 is failed
+ - "'Minimum version required' in publish_1.msg"
+ when: docker_py_version is version('3.0.0', '<')
+
+###################################################################
+## read_only ######################################################
+###################################################################
+
+- name: read_only
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ read_only: true
+ register: read_only_1
+ ignore_errors: true
+
+- name: read_only (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ read_only: true
+ register: read_only_2
+ ignore_errors: true
+
+- name: read_only (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ read_only: false
+ register: read_only_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - read_only_1 is changed
+ - read_only_2 is not changed
+ - read_only_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('2.6.0', '>=')
+- assert:
+ that:
+ - read_only_1 is failed
+ - "'Minimum version required' in read_only_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('2.6.0', '<')
+
+###################################################################
+## replicas #######################################################
+###################################################################
+
+- name: replicas
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ replicas: 2
+ register: replicas_1
+
+- name: replicas (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ replicas: 2
+ register: replicas_2
+
+- name: replicas (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ replicas: 3
+ register: replicas_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - replicas_1 is changed
+ - replicas_2 is not changed
+ - replicas_3 is changed
+
+###################################################################
+# resolve_image ###################################################
+###################################################################
+
+- name: resolve_image (false)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -v -c "sleep 10m"'
+ resolve_image: false
+ register: resolve_image_1
+
+- name: resolve_image (false idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -v -c "sleep 10m"'
+ resolve_image: false
+ register: resolve_image_2
+
+- name: resolve_image (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ command: '/bin/sh -v -c "sleep 10m"'
+ resolve_image: true
+ register: resolve_image_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - resolve_image_1 is changed
+ - resolve_image_2 is not changed
+ - resolve_image_3 is changed
+ when: docker_api_version is version('1.30', '>=') and docker_py_version is version('3.2.0', '>=')
+- assert:
+ that:
+ - resolve_image_1 is changed
+ - resolve_image_2 is not changed
+ - resolve_image_3 is failed
+ - "('version is ' ~ docker_py_version ~ ' ') in resolve_image_3.msg"
+ - "'Minimum version required is 3.2.0 ' in resolve_image_3.msg"
+ when: docker_api_version is version('1.30', '<') or docker_py_version is version('3.2.0', '<')
+
+###################################################################
+# tty #############################################################
+###################################################################
+
+- name: tty
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ tty: true
+ register: tty_1
+ ignore_errors: true
+
+- name: tty (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ tty: true
+ register: tty_2
+ ignore_errors: true
+
+- name: tty (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ tty: false
+ register: tty_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - tty_1 is changed
+ - tty_2 is not changed
+ - tty_3 is changed
+ when: docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - tty_1 is failed
+ - "'Minimum version required' in tty_1.msg"
+ when: docker_py_version is version('2.4.0', '<')
+
+###################################################################
+## user ###########################################################
+###################################################################
+
+- name: user
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ user: "operator"
+ register: user_1
+
+- name: user (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ user: "operator"
+ register: user_2
+
+- name: user (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ user: "root"
+ register: user_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - user_1 is changed
+ - user_2 is not changed
+ - user_3 is changed
+
+####################################################################
+## working_dir #####################################################
+####################################################################
+
+- name: working_dir
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ working_dir: /tmp
+ register: working_dir_1
+
+- name: working_dir (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ working_dir: /tmp
+ register: working_dir_2
+
+- name: working_dir (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ working_dir: /
+ register: working_dir_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - working_dir_1 is changed
+ - working_dir_2 is not changed
+ - working_dir_3 is changed
+
+####################################################################
+## init ############################################################
+####################################################################
+
+- name: init
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ init: true
+ register: init_1
+
+- name: init (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ init: true
+ register: init_2
+
+- name: init (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ init: false
+ register: init_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - init_1 is changed
+ - init_2 is not changed
+ - init_3 is changed
+ when: docker_api_version is version('1.37', '>=')
+
+- assert:
+ that:
+ - init_1 is failed
+ - "('version is ' ~ docker_api_version ~'. Minimum version required is 1.37') in hosts_1.msg"
+ when: docker_api_version is version('1.37', '<')
+
+####################################################################
+## cap_drop, capabilities ##########################################
+####################################################################
+
+- name: capabilities, cap_drop
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ init: true
+ cap_add:
+ - sys_time
+ cap_drop:
+ - all
+ register: capabilities_1
+ ignore_errors: true
+
+- name: capabilities, cap_drop (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ init: true
+ cap_add:
+ - sys_time
+ cap_drop:
+ - all
+ register: capabilities_2
+ ignore_errors: true
+ diff: true
+
+- name: capabilities, cap_drop (less)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ init: true
+ cap_add: []
+ cap_drop:
+ - all
+ register: capabilities_3
+ ignore_errors: true
+ diff: true
+
+- name: capabilities, cap_drop (changed)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ init: true
+ cap_add:
+ - setgid
+ cap_drop:
+ - all
+ register: capabilities_4
+ ignore_errors: true
+ diff: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - capabilities_1 is changed
+ - capabilities_2 is not changed
+ - capabilities_3 is changed
+ - capabilities_4 is changed
+ when: docker_api_version is version('1.41', '>=') and docker_py_version is version('5.0.3', '>=')
+
+- assert:
+ that:
+ - capabilities_1 is failed
+ - >
+ (('version is ' ~ docker_py_version ~ ' ') in capabilities_1.msg and 'Minimum version required is 5.0.3 ' in capabilities_1.msg)
+ or (('Docker API version is ' ~ docker_api_version ~ '. ') in capabilities_1.msg and 'Minimum version required is 1.41 ' in capabilities_1.msg)
+ when: docker_api_version is version('1.41', '<') or docker_py_version is version('5.0.3', '<')
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml
new file mode 100644
index 00000000..30ed155b
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/placement.yml
@@ -0,0 +1,261 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-placement' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+
+####################################################################
+## placement.preferences ###########################################
+####################################################################
+
+- name: placement.preferences
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences:
+ - spread: "node.labels.test"
+ register: placement_preferences_1
+ ignore_errors: true
+
+- name: placement.preferences (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences:
+ - spread: "node.labels.test"
+ register: placement_preferences_2
+ ignore_errors: true
+
+- name: placement.preferences (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences:
+ - spread: "node.labels.test2"
+ register: placement_preferences_3
+ ignore_errors: true
+
+- name: placement.preferences (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences: []
+ register: placement_preferences_4
+ ignore_errors: true
+
+- name: placement.preferences (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ preferences: []
+ register: placement_preferences_5
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - placement_preferences_1 is changed
+ - placement_preferences_2 is not changed
+ - placement_preferences_3 is changed
+ - placement_preferences_4 is changed
+ - placement_preferences_5 is not changed
+ when: docker_api_version is version('1.27', '>=') and docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - placement_preferences_1 is failed
+ - "'Minimum version required' in placement_preferences_1.msg"
+ when: docker_api_version is version('1.27', '<') or docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## placement.constraints #####################################################
+####################################################################
+
+- name: placement.constraints
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.role == manager"
+ register: constraints_1
+ ignore_errors: true
+
+- name: placement.constraints (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.role == manager"
+ register: constraints_2
+ ignore_errors: true
+
+- name: placement.constraints (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.role == worker"
+ register: constraints_3
+ ignore_errors: true
+
+- name: placement.constraints (add)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.role == worker"
+ - "node.label != non_existent_label"
+ register: constraints_4
+ ignore_errors: true
+
+- name: placement.constraints (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints:
+ - "node.label != non_existent_label"
+ - "node.role == worker"
+ register: constraints_5
+ ignore_errors: true
+
+- name: placement.constraints (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints: []
+ register: constraints_6
+ ignore_errors: true
+
+- name: placement.constraints (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ constraints: []
+ register: constraints_7
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - constraints_1 is changed
+ - constraints_2 is not changed
+ - constraints_3 is changed
+ - constraints_4 is changed
+ - constraints_5 is not changed
+ - constraints_6 is changed
+ - constraints_7 is not changed
+ when: docker_api_version is version('1.27', '>=') and docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - constraints_1 is failed
+ - "'Minimum version required' in constraints_1.msg"
+ when: docker_api_version is version('1.27', '<') or docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## placement.replicas_max_per_node #####################################################
+####################################################################
+
+- name: placement.replicas_max_per_node
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ replicas_max_per_node: 1
+ register: replicas_max_per_node_1
+ ignore_errors: true
+
+- name: placement.replicas_max_per_node (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ replicas_max_per_node: 1
+ register: replicas_max_per_node_2
+ ignore_errors: true
+
+- name: placement.replicas_max_per_node (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ placement:
+ replicas_max_per_node: 2
+ register: replicas_max_per_node_3
+ ignore_errors: true
+
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - replicas_max_per_node_1 is changed
+ - replicas_max_per_node_2 is not changed
+ - replicas_max_per_node_3 is changed
+ when: docker_api_version is version('1.40', '>=') and docker_py_version is version('4.4.3', '>=')
+- assert:
+ that:
+ - replicas_max_per_node_1 is failed
+ - "'Minimum version required' in replicas_max_per_node_1.msg"
+ when: docker_api_version is version('1.40', '<') or docker_py_version is version('4.4.3', '<')
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml
new file mode 100644
index 00000000..ac891750
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/resources.yml
@@ -0,0 +1,196 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-resources' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+####################################################################
+## limits.cpus #####################################################
+####################################################################
+
+- name: limits.cpus
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ cpus: 1
+ register: limit_cpu_1
+
+- name: limits.cpus (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ cpus: 1
+ register: limit_cpu_2
+
+- name: limits.cpus (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ cpus: 0.5
+ register: limit_cpu_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - limit_cpu_1 is changed
+ - limit_cpu_2 is not changed
+ - limit_cpu_3 is changed
+
+###################################################################
+## limits.memory ##################################################
+###################################################################
+
+- name: limits.memory
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ memory: 64M
+ register: limit_memory_1
+
+- name: limits.memory (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ memory: 64M
+ register: limit_memory_2
+
+- name: limits.memory (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ limits:
+ memory: 32M
+ register: limit_memory_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - limit_memory_1 is changed
+ - limit_memory_2 is not changed
+ - limit_memory_3 is changed
+
+###################################################################
+## reservations.cpus ##############################################
+###################################################################
+
+- name: reserve_cpu
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ cpus: 1
+ register: reserve_cpu_1
+
+- name: reserve_cpu (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ cpus: 1
+ register: reserve_cpu_2
+
+- name: reserve_cpu (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ cpus: 0.5
+ register: reserve_cpu_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - reserve_cpu_1 is changed
+ - reserve_cpu_2 is not changed
+ - reserve_cpu_3 is changed
+
+###################################################################
+## reservations.memory ############################################
+###################################################################
+
+- name: reservations.memory
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ memory: 64M
+ register: reserve_memory_1
+
+- name: reservations.memory (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ memory: 64M
+ register: reserve_memory_2
+
+- name: reservations.memory (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ reservations:
+ memory: 32M
+ register: reserve_memory_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - reserve_memory_1 is changed
+ - reserve_memory_2 is not changed
+ - reserve_memory_3 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml
new file mode 100644
index 00000000..f80a4475
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/restart_config.yml
@@ -0,0 +1,196 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-restart_config' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+###################################################################
+## restart_config.condition #######################################
+###################################################################
+
+- name: restart_config.condition
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ condition: "on-failure"
+ register: restart_policy_1
+
+- name: restart_config.condition (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ condition: "on-failure"
+ register: restart_policy_2
+
+- name: restart_config.condition (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ condition: "any"
+ register: restart_policy_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - restart_policy_1 is changed
+ - restart_policy_2 is not changed
+ - restart_policy_3 is changed
+
+###################################################################
+## restart_config.max_attempts ####################################
+###################################################################
+
+- name: restart_config.max_attempts
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ max_attempts: 1
+ register: restart_policy_attempts_1
+
+- name: restart_config.max_attempts (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ max_attempts: 1
+ register: restart_policy_attempts_2
+
+- name: restart_config.max_attempts (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ max_attempts: 2
+ register: restart_policy_attempts_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - restart_policy_attempts_1 is changed
+ - restart_policy_attempts_2 is not changed
+ - restart_policy_attempts_3 is changed
+
+###################################################################
+## restart_config.delay ###########################################
+###################################################################
+
+- name: restart_config.delay
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ delay: 5s
+ register: restart_policy_delay_1
+
+- name: restart_config.delay (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ delay: 5s
+ register: restart_policy_delay_2
+
+- name: restart_config.delay (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ delay: 10s
+ register: restart_policy_delay_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - restart_policy_delay_1 is changed
+ - restart_policy_delay_2 is not changed
+ - restart_policy_delay_3 is changed
+
+###################################################################
+## restart_config.window ##########################################
+###################################################################
+
+- name: restart_config.window
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ window: 10s
+ register: restart_policy_window_1
+
+- name: restart_config.window (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ window: 10s
+ register: restart_policy_window_2
+
+- name: restart_config.window (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ restart_config:
+ window: 20s
+ register: restart_policy_window_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - restart_policy_window_1 is changed
+ - restart_policy_window_2 is not changed
+ - restart_policy_window_3 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml
new file mode 100644
index 00000000..9035ffdb
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/rollback_config.yml
@@ -0,0 +1,342 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-rollback_config' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+###################################################################
+## rollback_config.delay ############################################
+###################################################################
+
+- name: rollback_config.delay
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ delay: 5s
+ register: rollback_config_delay_1
+ ignore_errors: true
+
+- name: rollback_config.delay (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ delay: 5s
+ register: rollback_config_delay_2
+ ignore_errors: true
+
+- name: rollback_config.delay (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ delay: 12s
+ register: rollback_config_delay_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - rollback_config_delay_1 is changed
+ - rollback_config_delay_2 is not changed
+ - rollback_config_delay_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_delay_1 is failed
+ - "'Minimum version required' in rollback_config_delay_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+## rollback_config.failure_action ###################################
+###################################################################
+
+- name: rollback_config.failure_action
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ failure_action: "pause"
+ register: rollback_config_failure_action_1
+ ignore_errors: true
+
+- name: rollback_config.failure_action (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ failure_action: "pause"
+ register: rollback_config_failure_action_2
+ ignore_errors: true
+
+- name: rollback_config.failure_action (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ failure_action: "continue"
+ register: rollback_config_failure_action_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - rollback_config_failure_action_1 is changed
+ - rollback_config_failure_action_2 is not changed
+ - rollback_config_failure_action_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_failure_action_1 is failed
+ - "'Minimum version required' in rollback_config_failure_action_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+## rollback_config.max_failure_ratio ################################
+###################################################################
+
+- name: rollback_config.max_failure_ratio
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ max_failure_ratio: 0.25
+ register: rollback_config_max_failure_ratio_1
+ ignore_errors: true
+
+- name: rollback_config.max_failure_ratio (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ max_failure_ratio: 0.25
+ register: rollback_config_max_failure_ratio_2
+ ignore_errors: true
+
+- name: rollback_config.max_failure_ratio (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ max_failure_ratio: 0.50
+ register: rollback_config_max_failure_ratio_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - rollback_config_max_failure_ratio_1 is changed
+ - rollback_config_max_failure_ratio_2 is not changed
+ - rollback_config_max_failure_ratio_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_max_failure_ratio_1 is failed
+ - "'Minimum version required' in rollback_config_max_failure_ratio_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+# rollback_config.monitor ###########################################
+###################################################################
+
+- name: rollback_config.monitor
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ monitor: 10s
+ register: rollback_config_monitor_1
+ ignore_errors: true
+
+- name: rollback_config.monitor (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ monitor: 10s
+ register: rollback_config_monitor_2
+ ignore_errors: true
+
+- name: rollback_config.monitor (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ monitor: 60s
+ register: rollback_config_monitor_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - rollback_config_monitor_1 is changed
+ - rollback_config_monitor_2 is not changed
+ - rollback_config_monitor_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_monitor_1 is failed
+ - "'Minimum version required' in rollback_config_monitor_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+# rollback_config.order #############################################
+###################################################################
+
+- name: rollback_config.order
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ order: "start-first"
+ register: rollback_config_order_1
+ ignore_errors: true
+
+- name: rollback_config.order (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ order: "start-first"
+ register: rollback_config_order_2
+ ignore_errors: true
+
+- name: rollback_config.order (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ order: "stop-first"
+ register: rollback_config_order_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - rollback_config_order_1 is changed
+ - rollback_config_order_2 is not changed
+ - rollback_config_order_3 is changed
+ when: docker_api_version is version('1.29', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_order_1 is failed
+ - "'Minimum version required' in rollback_config_order_1.msg"
+ when: docker_api_version is version('1.29', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+## rollback_config.parallelism ######################################
+###################################################################
+
+- name: rollback_config.parallelism
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ parallelism: 2
+ register: rollback_config_parallelism_1
+ ignore_errors: true
+
+- name: rollback_config.parallelism (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ parallelism: 2
+ register: rollback_config_parallelism_2
+ ignore_errors: true
+
+- name: rollback_config.parallelism (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ rollback_config:
+ parallelism: 1
+ register: rollback_config_parallelism_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - rollback_config_parallelism_1 is changed
+ - rollback_config_parallelism_2 is not changed
+ - rollback_config_parallelism_3 is changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+- assert:
+ that:
+ - rollback_config_parallelism_1 is failed
+ - "'Minimum version required' in rollback_config_parallelism_1.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml
new file mode 100644
index 00000000..2af5076f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/secrets.yml
@@ -0,0 +1,461 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering container name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-secrets' }}"
+ secret_name_1: "{{ name_prefix ~ '-secret-1' }}"
+ secret_name_2: "{{ name_prefix ~ '-secret-2' }}"
+ secret_name_3: "{{ name_prefix ~ '-secret-3' }}"
+
+- name: Registering container name
+ set_fact:
+ secret_names: "{{ secret_names + [secret_name_1, secret_name_2] }}"
+
+- docker_secret:
+ name: "{{ secret_name_1 }}"
+ data: "secret1"
+ state: "present"
+ register: "secret_result_1"
+ when: docker_py_version is version('2.1.0', '>=')
+
+- docker_secret:
+ name: "{{ secret_name_2 }}"
+ data: "secret2"
+ state: "present"
+ register: "secret_result_2"
+ when: docker_py_version is version('2.1.0', '>=')
+
+- docker_secret:
+ name: "{{ secret_name_3 }}"
+ data: "secret3"
+ state: "present"
+ rolling_versions: true
+ register: "secret_result_3"
+ when: docker_py_version is version('2.1.0', '>=')
+
+####################################################################
+## secrets #########################################################
+####################################################################
+
+- name: secrets
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ register: secrets_1
+ ignore_errors: true
+
+- name: secrets (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ register: secrets_2
+ ignore_errors: true
+
+- name: secrets (add)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ - secret_name: "{{ secret_name_2 }}"
+ filename: "/run/secrets/{{ secret_name_2 }}.txt"
+ register: secrets_3
+ ignore_errors: true
+
+- name: secrets (add idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ - secret_id: "{{ secret_result_2.secret_id|default('') }}"
+ secret_name: "{{ secret_name_2 }}"
+ filename: "/run/secrets/{{ secret_name_2 }}.txt"
+ register: secrets_4
+ ignore_errors: true
+
+- name: secrets (add idempotency no id)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ - secret_name: "{{ secret_name_2 }}"
+ filename: "/run/secrets/{{ secret_name_2 }}.txt"
+ register: secrets_5
+ ignore_errors: true
+
+- name: secrets (order idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_2 }}"
+ filename: "/run/secrets/{{ secret_name_2 }}.txt"
+ - secret_name: "{{ secret_name_1 }}"
+ filename: "/run/secrets/{{ secret_name_1 }}.txt"
+ register: secrets_6
+ ignore_errors: true
+
+- name: secrets (empty)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets: []
+ register: secrets_7
+ ignore_errors: true
+
+- name: secrets (empty idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets: []
+ register: secrets_8
+ ignore_errors: true
+
+- name: rolling secrets
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_3 }}_v1"
+ filename: "/run/secrets/{{ secret_name_3 }}.txt"
+ register: secrets_9
+ ignore_errors: true
+
+- name: update rolling secret
+ docker_secret:
+ name: "{{ secret_name_3 }}"
+ data: "newsecret3"
+ state: "present"
+ rolling_versions: true
+ register: secrets_10
+ when: docker_py_version is version('2.1.0', '>=')
+ ignore_errors: true
+
+- name: rolling secrets service update
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_name: "{{ secret_name_3 }}_v2"
+ filename: "/run/secrets/{{ secret_name_3 }}.txt"
+ register: secrets_11
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - secrets_1 is changed
+ - secrets_2 is not changed
+ - secrets_3 is changed
+ - secrets_4 is not changed
+ - secrets_5 is not changed
+ - secrets_6 is not changed
+ - secrets_7 is changed
+ - secrets_8 is not changed
+ - secrets_9 is changed
+ - secrets_10 is not failed
+ - secrets_11 is changed
+ when: docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - secrets_1 is failed
+ - "'Minimum version required' in secrets_1.msg"
+ when: docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## secrets (uid) ###################################################
+####################################################################
+
+- name: secrets (uid int)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: 1000
+ register: secrets_1
+ ignore_errors: true
+
+- name: secrets (uid int idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: 1000
+ register: secrets_2
+ ignore_errors: true
+
+- name: secrets (uid int change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: 1002
+ register: secrets_3
+ ignore_errors: true
+
+- name: secrets (uid str)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: "1001"
+ register: secrets_4
+ ignore_errors: true
+
+- name: secrets (uid str idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ uid: "1001"
+ register: secrets_5
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - secrets_1 is changed
+ - secrets_2 is not changed
+ - secrets_3 is changed
+ - secrets_4 is changed
+ - secrets_5 is not changed
+ when: docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - secrets_1 is failed
+ - "'Minimum version required' in secrets_1.msg"
+ when: docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## secrets (gid) ###################################################
+####################################################################
+
+- name: secrets (gid int)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: 1001
+ register: secrets_1
+ ignore_errors: true
+
+- name: secrets (gid int idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: 1001
+ register: secrets_2
+ ignore_errors: true
+
+- name: secrets (gid int change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: 1002
+ register: secrets_3
+ ignore_errors: true
+
+- name: secrets (gid str)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: "1003"
+ register: secrets_4
+ ignore_errors: true
+
+- name: secrets (gid str idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ gid: "1003"
+ register: secrets_5
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - secrets_1 is changed
+ - secrets_2 is not changed
+ - secrets_3 is changed
+ - secrets_4 is changed
+ - secrets_5 is not changed
+ when: docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - secrets_1 is failed
+ - "'Minimum version required' in secrets_1.msg"
+ when: docker_py_version is version('2.4.0', '<')
+
+####################################################################
+## secrets (mode) ##################################################
+####################################################################
+
+- name: secrets (mode)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ mode: 0600
+ register: secrets_1
+ ignore_errors: true
+
+- name: secrets (mode idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ mode: 0600
+ register: secrets_2
+ ignore_errors: true
+
+- name: secrets (mode change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ secrets:
+ - secret_id: "{{ secret_result_1.secret_id|default('') }}"
+ secret_name: "{{ secret_name_1 }}"
+ mode: 0777
+ register: secrets_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - secrets_1 is changed
+ - secrets_2 is not changed
+ - secrets_3 is changed
+ when: docker_py_version is version('2.4.0', '>=')
+- assert:
+ that:
+ - secrets_1 is failed
+ - "'Minimum version required' in secrets_1.msg"
+ when: docker_py_version is version('2.4.0', '<')
+
+####################################################################
+####################################################################
+####################################################################
+
+- name: Delete secrets
+ docker_secret:
+ name: "{{ secret_name }}"
+ state: absent
+ force: true
+ loop:
+ - "{{ secret_name_1 }}"
+ - "{{ secret_name_2 }}"
+ - "{{ secret_name_3 }}"
+ loop_control:
+ loop_var: secret_name
+ ignore_errors: true
+ when: docker_py_version is version('2.1.0', '>=')
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml
new file mode 100644
index 00000000..fb335a03
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/tasks/tests/update_config.yml
@@ -0,0 +1,350 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering service name
+ set_fact:
+ service_name: "{{ name_prefix ~ '-update_config' }}"
+
+- name: Registering service name
+ set_fact:
+ service_names: "{{ service_names + [service_name] }}"
+
+###################################################################
+## update_config.delay ############################################
+###################################################################
+
+- name: update_config.delay
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ delay: 5s
+ register: update_delay_1
+
+- name: update_config.delay (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ delay: 5s
+ register: update_delay_2
+
+- name: update_config.delay (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ delay: 12s
+ register: update_delay_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - update_delay_1 is changed
+ - update_delay_2 is not changed
+ - update_delay_3 is changed
+
+###################################################################
+## update_config.failure_action ###################################
+###################################################################
+
+- name: update_config.failure_action
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ failure_action: "pause"
+ register: update_failure_action_1
+
+- name: update_config.failure_action (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ failure_action: "pause"
+ register: update_failure_action_2
+
+- name: update_config.failure_action (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ failure_action: "continue"
+ register: update_failure_action_3
+
+- name: update_config.failure_action (rollback)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ failure_action: "rollback"
+ register: update_failure_action_4
+ ignore_errors: true
+
+- name: update_config.failure_action (rollback idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ failure_action: "rollback"
+ register: update_failure_action_5
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - update_failure_action_1 is changed
+ - update_failure_action_2 is not changed
+ - update_failure_action_3 is changed
+
+- assert:
+ that:
+ - update_failure_action_4 is changed
+ - update_failure_action_5 is not failed
+ - update_failure_action_5 is not changed
+ when: docker_api_version is version('1.28', '>=') and docker_py_version is version('3.5.0', '>=')
+
+- assert:
+ that:
+ - update_failure_action_4 is failed
+ - "'Minimum version required' in update_failure_action_4.msg"
+ when: docker_api_version is version('1.28', '<') or docker_py_version is version('3.5.0', '<')
+
+###################################################################
+## update_config.max_failure_ratio ################################
+###################################################################
+
+- name: update_config.max_failure_ratio
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ max_failure_ratio: 0.25
+ register: update_max_failure_ratio_1
+ ignore_errors: true
+
+- name: update_config.max_failure_ratio (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ max_failure_ratio: 0.25
+ register: update_max_failure_ratio_2
+ ignore_errors: true
+
+- name: update_config.max_failure_ratio (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ max_failure_ratio: 0.50
+ register: update_max_failure_ratio_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - update_max_failure_ratio_1 is changed
+ - update_max_failure_ratio_2 is not changed
+ - update_max_failure_ratio_3 is changed
+ when: docker_py_version is version('2.1.0', '>=')
+- assert:
+ that:
+ - update_max_failure_ratio_1 is failed
+ - "'Minimum version required' in update_max_failure_ratio_1.msg"
+ when: docker_py_version is version('2.1.0', '<')
+
+###################################################################
+# update_config.monitor ###########################################
+###################################################################
+
+- name: update_config.monitor
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ monitor: 10s
+ register: update_monitor_1
+ ignore_errors: true
+
+- name: update_config.monitor (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ monitor: 10s
+ register: update_monitor_2
+ ignore_errors: true
+
+- name: update_config.monitor (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ monitor: 60s
+ register: update_monitor_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - update_monitor_1 is changed
+ - update_monitor_2 is not changed
+ - update_monitor_3 is changed
+ when: docker_py_version is version('2.1.0', '>=')
+- assert:
+ that:
+ - update_monitor_1 is failed
+ - "'Minimum version required' in update_monitor_1.msg"
+ when: docker_py_version is version('2.1.0', '<')
+
+###################################################################
+# update_config.order #############################################
+###################################################################
+
+- name: update_config.order
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ order: "start-first"
+ register: update_order_1
+ ignore_errors: true
+
+- name: update_config.order (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ order: "start-first"
+ register: update_order_2
+ ignore_errors: true
+
+- name: update_config.order (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ order: "stop-first"
+ register: update_order_3
+ ignore_errors: true
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - update_order_1 is changed
+ - update_order_2 is not changed
+ - update_order_3 is changed
+ when: docker_api_version is version('1.29', '>=') and docker_py_version is version('2.7.0', '>=')
+- assert:
+ that:
+ - update_order_1 is failed
+ - "'Minimum version required' in update_order_1.msg"
+ when: docker_api_version is version('1.29', '<') or docker_py_version is version('2.7.0', '<')
+
+###################################################################
+## update_config.parallelism ######################################
+###################################################################
+
+- name: update_config.parallelism
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ parallelism: 2
+ register: update_parallelism_1
+
+- name: update_config.parallelism (idempotency)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ parallelism: 2
+ register: update_parallelism_2
+
+- name: update_config.parallelism (change)
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+ resolve_image: false
+ command: '/bin/sh -v -c "sleep 10m"'
+ update_config:
+ parallelism: 1
+ register: update_parallelism_3
+
+- name: cleanup
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ diff: false
+
+- assert:
+ that:
+ - update_parallelism_1 is changed
+ - update_parallelism_2 is not changed
+ - update_parallelism_3 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/vars/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/vars/main.yml
new file mode 100644
index 00000000..836ee41c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service/vars/main.yml
@@ -0,0 +1,60 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+service_expected_output:
+ args: [sleep, '1800']
+ cap_add: null
+ cap_drop: null
+ configs: null
+ constraints: null
+ container_labels: null
+ command: null
+ dns: null
+ dns_options: null
+ dns_search: null
+ endpoint_mode: vip
+ env: null
+ force_update: null
+ groups: null
+ healthcheck: null
+ healthcheck_disabled: null
+ hostname: null
+ hosts: null
+ image: "{{ docker_test_image_busybox }}"
+ labels: null
+ limit_cpu: null
+ limit_memory: null
+ log_driver: null
+ log_driver_options: null
+ mode: global
+ mounts: null
+ networks: null
+ secrets: null
+ stop_grace_period: null
+ stop_signal: null
+ placement_preferences: null
+ publish:
+ - {mode: null, protocol: tcp, published_port: 60001, target_port: 60001}
+ - {mode: null, protocol: udp, published_port: 60001, target_port: 60001}
+ read_only: null
+ replicas: null
+ replicas_max_per_node: null
+ reserve_cpu: null
+ reserve_memory: null
+ restart_policy: null
+ restart_policy_attempts: null
+ restart_policy_delay: null
+ restart_policy_window: null
+ rollback_config: null
+ tty: null
+ update_delay: null
+ update_failure_action: null
+ update_max_failure_ratio: null
+ update_monitor: null
+ update_order: null
+ update_parallelism: null
+ user: null
+ working_dir: null
+ init: null
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/aliases
new file mode 100644
index 00000000..fc581d54
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/3
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/main.yml
new file mode 100644
index 00000000..cd112a89
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- include_tasks: test_docker_swarm_service_info.yml
+ when: docker_py_version is version('2.0.0', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_swarm_service_info tests!"
+ when: not(docker_py_version is version('2.0.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml
new file mode 100644
index 00000000..ee191138
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_swarm_service_info/tasks/test_docker_swarm_service_info.yml
@@ -0,0 +1,85 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Generate service base name
+ set_fact:
+ service_base_name: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+
+- name: Registering service names
+ set_fact:
+ service_name: "{{ service_base_name ~ '-1' }}"
+
+- block:
+ - name: Make sure we're not already using Docker swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: Try to get docker_swarm_service_info when docker is not running in swarm mode
+ docker_swarm_service_info:
+ name: "{{ service_name }}"
+ ignore_errors: true
+ register: output
+
+ - name: assert failure when called when swarm is not in use or not run on manager node
+ assert:
+ that:
+ - 'output is failed'
+ - 'output.msg == "Error running docker swarm module: must run on swarm manager node"'
+
+ - name: Create a Swarm cluster
+ docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
+ register: output
+
+ - name: Create services
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ image: "{{ docker_test_image_alpine }}"
+
+ - name: Try to get docker_swarm_service_info for a single service
+ docker_swarm_service_info:
+ name: "{{ service_name }}"
+ register: output
+
+ - name: assert reading reading service info
+ assert:
+ that:
+ - 'output.exists == true'
+ - 'output.service.ID is string'
+ - 'output.service.Spec.Name == service_name'
+
+ - name: Create random name
+ set_fact:
+ random_service_name: "{{ 'random-service-%0x' % ((2**32) | random) }}"
+
+ - name: Try to get docker_swarm_service_info using random service name as parameter
+ docker_swarm_service_info:
+ name: "{{ random_service_name }}"
+ register: output
+
+ - name: assert reading reading service info
+ assert:
+ that:
+ - 'output.service is none'
+ - 'output.exists == false'
+
+ always:
+ - name: Remove services
+ docker_swarm_service:
+ name: "{{ service_name }}"
+ state: absent
+ ignore_errors: true
+
+ - name: Remove swarm
+ docker_swarm:
+ state: absent
+ force: true
+
+ when: docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_swarm_service_info tests!"
+ when: not(docker_py_version is version('2.0.2', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_volume/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_volume/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_volume/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_volume/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_volume/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_volume/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/main.yml
new file mode 100644
index 00000000..b356e561
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/main.yml
@@ -0,0 +1,34 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create random name prefix
+ set_fact:
+ name_prefix: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+ vnames: []
+
+- debug:
+ msg: "Using name prefix {{ name_prefix }}"
+
+- block:
+ - include_tasks: run-test.yml
+ with_fileglob:
+ - "tests/*.yml"
+
+ always:
+ - name: "Make sure all volumes are removed"
+ docker_volume:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ vnames }}"
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_volume tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/run-test.yml b/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/run-test.yml
new file mode 100644
index 00000000..65853ddd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/run-test.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Loading tasks from {{ item }}"
+ include_tasks: "{{ item }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/tests/basic.yml b/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/tests/basic.yml
new file mode 100644
index 00000000..b65bbd9d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_volume/tasks/tests/basic.yml
@@ -0,0 +1,181 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Registering volume name
+ set_fact:
+ vname: "{{ name_prefix ~ '-basic' }}"
+- name: Registering container name
+ set_fact:
+ vnames: "{{ vnames + [vname] }}"
+
+####################################################################
+## basic ###########################################################
+####################################################################
+
+- name: Create a volume
+ docker_volume:
+ name: "{{ vname }}"
+ register: create_1
+
+- name: Create a volume (idempotency)
+ docker_volume:
+ name: "{{ vname }}"
+ register: create_2
+
+- name: "Create a volume (recreate: options-changed)"
+ docker_volume:
+ name: "{{ vname }}"
+ recreate: options-changed
+ register: create_3
+
+- name: "Create a volume (recreate: always)"
+ docker_volume:
+ name: "{{ vname }}"
+ recreate: always
+ register: create_4
+
+- name: Remove a volume
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
+ register: absent_1
+
+- name: Remove a volume (idempotency)
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
+ register: absent_2
+
+- assert:
+ that:
+ - create_1 is changed
+ - create_2 is not changed
+ - create_3 is not changed
+ - create_4 is changed
+ - absent_1 is changed
+ - absent_2 is not changed
+
+####################################################################
+## driver_options ##################################################
+####################################################################
+
+- name: Create a volume with options
+ docker_volume:
+ name: "{{ vname }}"
+ driver: local
+ driver_options:
+ type: tempfs
+ device: tmpfs
+ o: size=100m,uid=1000
+ register: driver_options_1
+
+- name: Create a volume with options (idempotency)
+ docker_volume:
+ name: "{{ vname }}"
+ driver: local
+ driver_options:
+ type: tempfs
+ device: tmpfs
+ o: size=100m,uid=1000
+ register: driver_options_2
+
+- name: Create a volume with options (changed)
+ docker_volume:
+ name: "{{ vname }}"
+ driver: local
+ driver_options:
+ type: tempfs
+ device: tmpfs
+ o: size=200m,uid=1000
+ register: driver_options_3
+
+- name: "Create a volume with options (changed, recreate: options-changed)"
+ docker_volume:
+ name: "{{ vname }}"
+ driver: local
+ driver_options:
+ type: tempfs
+ device: tmpfs
+ o: size=200m,uid=1000
+ recreate: options-changed
+ register: driver_options_4
+
+- name: Cleanup
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
+
+- assert:
+ that:
+ - driver_options_1 is changed
+ - driver_options_2 is not changed
+ - driver_options_3 is not changed
+ - driver_options_4 is changed
+
+####################################################################
+## labels ##########################################################
+####################################################################
+
+- name: Create a volume with labels
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.2: world
+ register: driver_labels_1
+
+- name: Create a volume with labels (idempotency)
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.2: world
+ ansible.test.1: hello
+ register: driver_labels_2
+
+- name: Create a volume with labels (less)
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ register: driver_labels_3
+
+- name: "Create a volume with labels (less, recreate: options-changed)"
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ recreate: options-changed
+ register: driver_labels_4
+
+- name: Create a volume with labels (more)
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ register: driver_labels_5
+
+- name: "Create a volume with labels (more, recreate: options-changed)"
+ docker_volume:
+ name: "{{ vname }}"
+ labels:
+ ansible.test.1: hello
+ ansible.test.3: ansible
+ recreate: options-changed
+ register: driver_labels_6
+
+- name: Cleanup
+ docker_volume:
+ name: "{{ vname }}"
+ state: absent
+
+- assert:
+ that:
+ - driver_labels_1 is changed
+ - driver_labels_2 is not changed
+ - driver_labels_3 is not changed
+ - driver_labels_4 is not changed
+ - driver_labels_5 is not changed
+ - driver_labels_6 is changed
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/aliases b/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/tasks/main.yml
new file mode 100644
index 00000000..09cb84d7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/docker_volume_info/tasks/main.yml
@@ -0,0 +1,77 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- block:
+ - name: Create random volume name
+ set_fact:
+ cname: "{{ 'ansible-docker-test-%0x' % ((2**32) | random) }}"
+
+ - name: Make sure volume is not there
+ docker_volume:
+ name: "{{ cname }}"
+ state: absent
+
+ - name: Inspect a non-present volume
+ docker_volume_info:
+ name: "{{ cname }}"
+ register: result
+
+ - assert:
+ that:
+ - "not result.exists"
+ - "'volume' in result"
+ - "result.volume is none"
+
+ - name: Make sure volume exists
+ docker_volume:
+ name: "{{ cname }}"
+
+ - name: Inspect a present volume
+ docker_volume_info:
+ name: "{{ cname }}"
+ register: result
+ - name: Dump docker_volume_info result
+ debug: var=result
+
+ - name: "Comparison: use 'docker volume inspect'"
+ command: docker volume inspect "{{ cname }}"
+ register: docker_volume_inspect
+ ignore_errors: true
+ - block:
+ - set_fact:
+ docker_volume_inspect_result: "{{ docker_volume_inspect.stdout | from_json }}"
+ - name: Dump docker volume inspect result
+ debug: var=docker_volume_inspect_result
+ when: docker_volume_inspect is not failed
+
+ - name: Cleanup
+ docker_volume:
+ name: "{{ cname }}"
+ state: absent
+
+ - assert:
+ that:
+ - result.exists
+ - "'volume' in result"
+ - "result.volume"
+
+ - assert:
+ that:
+ - "result.volume == docker_volume_inspect_result[0]"
+ when: docker_volume_inspect is not failed
+ - assert:
+ that:
+ - "'is too new. Maximum supported API version is' in docker_volume_inspect.stderr"
+ when: docker_volume_inspect is failed
+
+ when: docker_api_version is version('1.25', '>=')
+
+- fail: msg="Too old docker / docker-py version to run docker_volume_info tests!"
+ when: not(docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
diff --git a/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/aliases b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/aliases
new file mode 100644
index 00000000..2e1acc0a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
diff --git a/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/files/nginx.conf b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/files/nginx.conf
new file mode 100644
index 00000000..50f92c29
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/files/nginx.conf
@@ -0,0 +1,50 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+user root;
+
+events {
+ worker_connections 16;
+}
+
+http {
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ error_log /dev/stdout info;
+ access_log /dev/stdout;
+
+ server {
+ listen *:5000 ssl;
+ server_name daemon-tls.ansible.com;
+ server_name_in_redirect on;
+
+ ssl_protocols TLSv1.2;
+ ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256';
+ ssl_ecdh_curve X25519:secp521r1:secp384r1;
+ ssl_prefer_server_ciphers on;
+ ssl_certificate /etc/nginx/cert.pem;
+ ssl_certificate_key /etc/nginx/cert.key;
+
+ location / {
+ proxy_pass http://unix:/var/run/docker.sock:/;
+
+ client_max_body_size 0;
+ chunked_transfer_encoding on;
+ }
+ }
+
+ server {
+ listen *:6000;
+ server_name daemon.ansible.com;
+ server_name_in_redirect on;
+
+ location / {
+ proxy_pass http://unix:/var/run/docker.sock:/;
+
+ client_max_body_size 0;
+ chunked_transfer_encoding on;
+ }
+ }
+}
diff --git a/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/filter_plugins/filter_attr.py b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/filter_plugins/filter_attr.py
new file mode 100644
index 00000000..f821b7e7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/filter_plugins/filter_attr.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+def sanitize_host_info(data):
+ data = data.copy()
+ for key in ('SystemTime', 'NFd', 'NGoroutines', ):
+ data.pop(key, None)
+ return data
+
+
+class FilterModule:
+ def filters(self):
+ return {
+ 'sanitize_host_info': sanitize_host_info,
+ }
diff --git a/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/meta/main.yml
new file mode 100644
index 00000000..e7ff3d68
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/meta/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_openssl
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/tasks/main.yml
new file mode 100644
index 00000000..abbb0295
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/tasks/main.yml
@@ -0,0 +1,195 @@
+---
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Create random nginx frontend name
+ set_fact:
+ daemon_nginx_frontend: '{{ "ansible-docker-test-daemon-frontend-%0x" % ((2**32) | random) }}'
+
+- block:
+ - name: Create volume for config files
+ docker_volume:
+ name: '{{ daemon_nginx_frontend }}'
+ state: present
+
+ - name: Create container for nginx frontend for daemon
+ docker_container:
+ state: stopped
+ name: '{{ daemon_nginx_frontend }}'
+ image: "{{ docker_test_image_registry_nginx }}"
+ volumes:
+ - '{{ daemon_nginx_frontend }}:/etc/nginx/'
+ - '/var/run/docker.sock:/var/run/docker.sock'
+ network_mode: '{{ current_container_network_ip | default(omit, true) }}'
+ networks: >-
+ {{
+ [dict([['name', current_container_network_ip]])]
+ if current_container_network_ip not in ['', 'bridge'] else omit
+ }}
+ register: nginx_container
+
+ - name: Copy config files
+ copy:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/{{ item }}"
+ mode: "0644"
+ loop:
+ - nginx.conf
+
+ - name: Copy static files into volume
+ docker_container_copy_into:
+ container: '{{ daemon_nginx_frontend }}'
+ path: '{{ remote_tmp_dir }}/{{ item }}'
+ container_path: '/etc/nginx/{{ item }}'
+ owner_id: 0
+ group_id: 0
+ loop:
+ - nginx.conf
+ register: can_copy_files
+ ignore_errors: true
+
+ - when: can_copy_files is not failed
+ block:
+
+ - name: Create private keys
+ community.crypto.openssl_privatekey:
+ path: '{{ remote_tmp_dir }}/{{ item }}.key'
+ type: ECC
+ curve: secp256r1
+ force: true
+ loop:
+ - cert
+ - ca
+
+ - name: Create CSR for CA certificate
+ community.crypto.openssl_csr:
+ path: '{{ remote_tmp_dir }}/ca.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/ca.key'
+ basic_constraints:
+ - 'CA:TRUE'
+ basic_constraints_critical: true
+
+ - name: Create CA certificate
+ community.crypto.x509_certificate:
+ path: '{{ remote_tmp_dir }}/ca.pem'
+ csr_path: '{{ remote_tmp_dir }}/ca.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/ca.key'
+ provider: selfsigned
+
+ - name: Create CSR for frontend certificate
+ community.crypto.openssl_csr:
+ path: '{{ remote_tmp_dir }}/cert.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/cert.key'
+ subject_alt_name:
+ - DNS:daemon-tls.ansible.com
+
+ - name: Create frontend certificate
+ community.crypto.x509_certificate:
+ path: '{{ remote_tmp_dir }}/cert.pem'
+ csr_path: '{{ remote_tmp_dir }}/cert.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/cert.key'
+ ownca_path: '{{ remote_tmp_dir }}/ca.pem'
+ ownca_privatekey_path: '{{ remote_tmp_dir }}/ca.key'
+ provider: ownca
+
+ - name: Copy dynamic files into volume
+ docker_container_copy_into:
+ container: '{{ daemon_nginx_frontend }}'
+ path: '{{ remote_tmp_dir }}/{{ item }}'
+ container_path: '/etc/nginx/{{ item }}'
+ owner_id: 0
+ group_id: 0
+ loop:
+ - ca.pem
+ - cert.pem
+ - cert.key
+
+ - name: Start nginx frontend for daemon
+ docker_container:
+ name: '{{ daemon_nginx_frontend }}'
+ state: started
+ register: nginx_container
+
+ - name: Output nginx container network settings
+ debug:
+ var: nginx_container.container.NetworkSettings
+
+ - name: Get proxied daemon URLs
+ set_fact:
+ docker_daemon_frontend_https: "https://{{ nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress if current_container_network_ip else nginx_container.container.NetworkSettings.IPAddress }}:5000"
+ docker_daemon_frontend_http: "http://{{ nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress if current_container_network_ip else nginx_container.container.NetworkSettings.IPAddress }}:6000"
+
+ - name: Wait for registry frontend
+ uri:
+ url: '{{ docker_daemon_frontend_http }}/version'
+ register: result
+ until: result is success
+ retries: 5
+ delay: 1
+
+ - name: Get docker daemon information directly
+ docker_host_info:
+ register: output_direct
+
+ - name: Show direct host info
+ debug:
+ var: output_direct.host_info | sanitize_host_info
+
+ - name: Get docker daemon information via HTTP
+ docker_host_info:
+ docker_host: '{{ docker_daemon_frontend_http }}'
+ register: output_http
+
+ - name: Show HTTP host info
+ debug:
+ var: output_http.host_info | sanitize_host_info
+
+ - name: Check that information matches
+ assert:
+ that:
+ - (output_direct.host_info | sanitize_host_info) == (output_http.host_info | sanitize_host_info)
+
+ - name: Get docker daemon information via HTTPS
+ docker_host_info:
+ docker_host: '{{ docker_daemon_frontend_https }}'
+ tls_hostname: daemon-tls.ansible.com
+ ca_cert: '{{ remote_tmp_dir }}/ca.pem'
+ tls: true
+ validate_certs: true
+ register: output_https
+
+ - name: Show HTTPS host info
+ debug:
+ var: output_https.host_info | sanitize_host_info
+
+ - name: Check that information matches
+ assert:
+ that:
+ - (output_direct.host_info | sanitize_host_info) == (output_https.host_info | sanitize_host_info)
+
+ always:
+ - command: docker logs {{ daemon_nginx_frontend }}
+ register: output
+ ignore_errors: true
+ - debug:
+ var: output.stdout_lines
+ ignore_errors: true
+
+ - name: Remove container
+ docker_container:
+ state: absent
+ name: '{{ daemon_nginx_frontend }}'
+ force_kill: true
+ ignore_errors: true
+
+ - name: Remove volume
+ docker_volume:
+ name: '{{ daemon_nginx_frontend }}'
+ state: absent
+ ignore_errors: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/vars/main.yml b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/vars/main.yml
new file mode 100644
index 00000000..e4eafc24
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/generic_connection_tests/vars/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_test_image_digest_v1: e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9
+docker_test_image_digest_v2: ee44b399df993016003bf5466bd3eeb221305e9d0fa831606bc7902d149c775b
+docker_test_image_digest_base: quay.io/ansible/docker-test-containers
+docker_test_image_hello_world: quay.io/ansible/docker-test-containers:hello-world
+docker_test_image_hello_world_base: quay.io/ansible/docker-test-containers
+docker_test_image_busybox: quay.io/ansible/docker-test-containers:busybox
+docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8
+docker_test_image_alpine_different: quay.io/ansible/docker-test-containers:alpine3.7
+docker_test_image_registry_nginx: quay.io/ansible/docker-test-containers:nginx-alpine
+docker_test_image_registry: registry:2.6.1
diff --git a/ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/aliases b/ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/aliases
new file mode 100644
index 00000000..116ebd84
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/aliases
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
+needs/root
+skip/docker # we need a VM, and not a container
+skip/alpine # for some reason, SSH has problems with Alpine VMs
diff --git a/ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/meta/main.yml
new file mode 100644
index 00000000..6fdc1c8e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_paramiko
diff --git a/ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/tasks/main.yml
new file mode 100644
index 00000000..94554f71
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/generic_ssh_connection/tasks/main.yml
@@ -0,0 +1,90 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Get docker daemon information directly
+ docker_host_info:
+ register: output
+
+- name: Make sure we got information
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+
+- name: Show contents of ~/.ssh
+ command: ls -lah ~/.ssh
+ ignore_errors: true
+
+- name: Recover home directory on remote
+ command: echo $HOME
+ register: remote_home
+
+- name: Print remote home directory
+ debug:
+ var: remote_home.stdout
+
+- name: Create SSH config
+ copy:
+ dest: "{{ remote_home.stdout }}/.ssh/config"
+ mode: '0600'
+ content: |
+ Host localhost
+ User root
+ IdentityFile ~/.ssh/id_rsa
+
+- name: Get docker daemon information via ssh (paramiko) to localhost
+ docker_host_info:
+ docker_host: "ssh://root@localhost"
+ register: output
+ ignore_errors: true
+
+- name: Make sure we got information
+ assert:
+ that:
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+ when: output is succeeded or 'Failed to import the required Python library (paramiko)' not in output.msg
+ # Sometimes paramiko being installed isn't enough: importing it can fail
+ # due to 'ImportError: No module named x25519' when it executes
+ # `from cryptography.hazmat.primitives.asymmetric.x25519 import ...`.
+
+- name: Get docker daemon information via ssh (OpenSSH) to localhost
+ docker_host_info:
+ docker_host: "ssh://root@localhost"
+ use_ssh_client: true
+ register: output
+ ignore_errors: true
+
+- name: Make sure we got information
+ assert:
+ that:
+ - output is succeeded
+ - 'output.host_info.Name is string'
+ - 'output.containers is not defined'
+ - 'output.networks is not defined'
+ - 'output.volumes is not defined'
+ - 'output.images is not defined'
+ - 'output.disk_usage is not defined'
+ when: docker_py_version is version('4.4.0', '>=')
+
+- name: Make sure we got information
+ assert:
+ that:
+ - output is failed
+ - "'use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer' in output.msg"
+ when: docker_py_version is version('4.4.0', '<')
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/aliases b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/aliases
new file mode 100644
index 00000000..1485e0b2
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/4
+destructive
+needs/root
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_1.docker.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_1.docker.yml
new file mode 100644
index 00000000..83fd6260
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_1.docker.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+plugin: community.docker.docker_containers
+docker_host: unix://var/run/docker.sock
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_2.docker.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_2.docker.yml
new file mode 100644
index 00000000..98349507
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/inventory_2.docker.yml
@@ -0,0 +1,11 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+plugin: community.docker.docker_containers
+docker_host: unix://var/run/docker.sock
+connection_type: ssh
+verbose_output: true
+add_legacy_groups: true
+default_ip: 1.2.3.4
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_cleanup.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_cleanup.yml
new file mode 100644
index 00000000..6f2a3b6c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_cleanup.yml
@@ -0,0 +1,26 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: true
+ tasks:
+ - name: remove docker containers
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ loop:
+ - ansible-docker-test-docker-inventory-container-1
+ - ansible-docker-test-docker-inventory-container-2
+
+ - name: remove docker pagkages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name:
+ - docker
+ - docker-ce
+ - docker-ce-cli
+ state: absent
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_setup.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_setup.yml
new file mode 100644
index 00000000..0c1f3368
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/docker_setup.yml
@@ -0,0 +1,26 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local
+ vars:
+ docker_skip_cleanup: true
+
+ tasks:
+ - name: Setup docker
+ import_role:
+ name: setup_docker
+
+ - name: Start containers
+ docker_container:
+ name: "{{ item.name }}"
+ image: "{{ docker_test_image_alpine }}"
+ state: started
+ command: '/bin/sh -c "sleep 10m"'
+ published_ports:
+ - 22/tcp
+ loop:
+ - name: ansible-docker-test-docker-inventory-container-1
+ - name: ansible-docker-test-docker-inventory-container-2
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_1.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_1.yml
new file mode 100644
index 00000000..c0f28f57
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_1.yml
@@ -0,0 +1,40 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: false
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Make sure that the default groups are there, but no others
+ assert:
+ that:
+ - groups.all | length >= 2
+ - groups.ungrouped | length >= 2
+ - groups | length == 2
+
+- hosts: all
+ gather_facts: false
+ tasks:
+ - when:
+ # When the integration tests are run inside a docker container, there
+ # will be other containers.
+ - inventory_hostname.startswith('ansible-docker-test-docker-inventory-container-')
+ block:
+
+ - name: Run raw command
+ raw: ls /
+ register: output
+
+ - name: Check whether we have some directories we expect in the output
+ assert:
+ that:
+ - "'bin' in output.stdout_lines"
+ - "'dev' in output.stdout_lines"
+ - "'lib' in output.stdout_lines"
+ - "'proc' in output.stdout_lines"
+ - "'sys' in output.stdout_lines"
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_2.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_2.yml
new file mode 100644
index 00000000..28f8dc00
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/playbooks/test_inventory_2.yml
@@ -0,0 +1,49 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: false
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Load variables
+ include_vars: ../../setup_docker/vars/main.yml
+ - name: Make sure that the expected groups are there
+ assert:
+ that:
+ - groups.all | length >= 2
+ - groups.ungrouped | length >= 0
+ - groups.running | length >= 2
+ - groups.stopped | length >= 0
+ - groups['image_' ~ docker_test_image_alpine] | length == 2
+ - groups['ansible-docker-test-docker-inventory-container-1'] | length == 1
+ - groups['ansible-docker-test-docker-inventory-container-2'] | length == 1
+ - groups['unix://var/run/docker.sock'] | length >= 2
+ - groups | length >= 12
+ # The four additional groups are IDs and short IDs of the containers.
+ # When the integration tests are run inside a docker container, there
+ # will be more groups (for the additional container(s)).
+
+- hosts: all
+ # We don't really want to connect to the nodes, since we have no SSH daemon running on them
+ connection: local
+ vars:
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ gather_facts: false
+ tasks:
+ - name: Show all variables
+ debug:
+ var: hostvars[inventory_hostname]
+ - name: Make sure SSH is set up
+ assert:
+ that:
+ - ansible_ssh_host == '1.2.3.4'
+ - ansible_ssh_port == docker_networksettings.Ports['22/tcp'][0].HostPort
+ when:
+ # When the integration tests are run inside a docker container, there
+ # will be other containers.
+ - inventory_hostname.startswith('ansible-docker-test-docker-inventory-container-')
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/runme.sh b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/runme.sh
new file mode 100755
index 00000000..acc1d5f4
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_containers/runme.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+cleanup() {
+ echo "Cleanup"
+ ansible-playbook playbooks/docker_cleanup.yml
+ echo "Done"
+}
+
+trap cleanup INT TERM EXIT
+
+echo "Setup"
+ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/docker_setup.yml
+
+echo "Test docker_containers inventory 1"
+ansible-playbook -i inventory_1.docker.yml playbooks/test_inventory_1.yml
+
+echo "Test docker_containers inventory 2"
+ansible-playbook -i inventory_2.docker.yml playbooks/test_inventory_2.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/aliases b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/aliases
new file mode 100644
index 00000000..956df459
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/aliases
@@ -0,0 +1,8 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/2
+disabled
+destructive
+needs/root
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/docker-machine b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/docker-machine
new file mode 100644
index 00000000..aad9e5fe
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/docker-machine
@@ -0,0 +1,24 @@
+#!/usr/bin/env bash
+# Mock Docker Machine wrapper for testing purposes
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[ "$MOCK_ERROR_IN" == "$1" ] && echo >&2 "Mock Docker Machine error" && exit 1
+case $1 in
+ env)
+ cat <<'EOF'
+export DOCKER_TLS_VERIFY="1"
+export DOCKER_HOST="tcp://134.209.204.160:2376"
+export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
+export DOCKER_MACHINE_NAME="routinator"
+# Run this command to configure your shell:
+# eval $(docker-machine env --shell=bash routinator)
+EOF
+ ;;
+
+ *)
+ /usr/bin/docker-machine $*
+ ;;
+esac
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml
new file mode 100644
index 00000000..f8fc6b0c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_1.docker_machine.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+plugin: community.docker.docker_machine
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml
new file mode 100644
index 00000000..817c5578
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_2.docker_machine.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+plugin: community.docker.docker_machine
+daemon_env: require
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml
new file mode 100644
index 00000000..95a6e827
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/inventory_3.docker_machine.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+plugin: community.docker.docker_machine
+daemon_env: optional
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml
new file mode 100644
index 00000000..3c6c1367
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/pre-setup.yml
@@ -0,0 +1,22 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local
+ tasks:
+ - name: Setup docker
+ include_role:
+ name: setup_docker
+
+ # There seems to be no better way to install docker-machine. At least I couldn't find any packages for RHEL7/8.
+ - name: Download docker-machine binary
+ vars:
+ docker_machine_version: "0.16.1"
+ get_url:
+ url: "https://github.com/docker/machine/releases/download/v{{ docker_machine_version }}/docker-machine-{{ ansible_system }}-{{ ansible_userspace_architecture }}"
+ dest: /tmp/docker-machine
+ - name: Install docker-machine binary
+ command: install /tmp/docker-machine /usr/bin/docker-machine
+ become: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml
new file mode 100644
index 00000000..02d9ad4a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/setup.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local
+ tasks:
+ - name: Request Docker Machine to use this machine as a generic VM
+ command: "docker-machine --debug create \
+ --driver generic \
+ --generic-ip-address=localhost \
+ --generic-ssh-key {{ lookup('env', 'HOME') }}/.ssh/id_rsa \
+ --generic-ssh-user root \
+ vm"
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml
new file mode 100644
index 00000000..8fb6fbdf
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/teardown.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local
+ tasks:
+ - name: Request Docker Machine to remove this machine as a generic VM
+ command: "docker-machine rm vm -f"
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml
new file mode 100644
index 00000000..fb58718d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/playbooks/test_inventory_1.yml
@@ -0,0 +1,55 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ gather_facts: false
+ tasks:
+ - name: sanity check Docker Machine output
+ vars:
+ dm_ls_format: !unsafe '{{.Name}} | {{.DriverName}} | {{.State}} | {{.URL}} | {{.Error}}'
+ success_regex: "^vm | [^|]+ | Running | tcp://.+ |$"
+ command: docker-machine ls --format '{{ dm_ls_format }}'
+ register: result
+ failed_when: result.rc != 0 or result.stdout is not match(success_regex)
+
+ - name: verify Docker Machine ip
+ command: docker-machine ip vm
+ register: result
+ failed_when: result.rc != 0 or result.stdout != hostvars['vm'].ansible_host
+
+ - name: verify Docker Machine env
+ command: docker-machine env --shell=sh vm
+ register: result
+
+ - debug: var=result.stdout
+
+ - assert:
+ that:
+ - "'DOCKER_TLS_VERIFY=\"{{ hostvars['vm'].dm_DOCKER_TLS_VERIFY }}\"' in result.stdout"
+ - "'DOCKER_HOST=\"{{ hostvars['vm'].dm_DOCKER_HOST }}\"' in result.stdout"
+ - "'DOCKER_CERT_PATH=\"{{ hostvars['vm'].dm_DOCKER_CERT_PATH }}\"' in result.stdout"
+ - "'DOCKER_MACHINE_NAME=\"{{ hostvars['vm'].dm_DOCKER_MACHINE_NAME }}\"' in result.stdout"
+
+- hosts: vm
+ gather_facts: false
+ tasks:
+ - name: do something to verify that accept-new ssh setting was applied by the docker-machine inventory plugin
+ raw: uname -a
+ register: result
+
+ - debug: var=result.stdout
+
+- hosts: 127.0.0.1
+ gather_facts: false
+ environment:
+ DOCKER_CERT_PATH: "{{ hostvars['vm'].dm_DOCKER_CERT_PATH }}"
+ DOCKER_HOST: "{{ hostvars['vm'].dm_DOCKER_HOST }}"
+ DOCKER_MACHINE_NAME: "{{ hostvars['vm'].dm_DOCKER_MACHINE_NAME }}"
+ DOCKER_TLS_VERIFY: "{{ hostvars['vm'].dm_DOCKER_TLS_VERIFY }}"
+ tasks:
+ - name: run a Docker container on the target Docker Machine host to verify that Docker daemon connection settings from the docker-machine inventory plugin work as expected
+ docker_container:
+ name: test
+ image: "{{ docker_test_image_hello_world }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/runme.sh b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/runme.sh
new file mode 100755
index 00000000..b39a08c4
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/runme.sh
@@ -0,0 +1,71 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+SCRIPT_DIR=$(dirname "$0")
+
+echo "Who am I: $(whoami)"
+echo "Home: ${HOME}"
+echo "PWD: $(pwd)"
+echo "Script dir: ${SCRIPT_DIR}"
+
+# restrict Ansible just to our inventory plugin, to prevent inventory data being matched by the test but being provided
+# by some other dynamic inventory provider
+export ANSIBLE_INVENTORY_ENABLED=docker_machine
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+SAVED_PATH="$PATH"
+
+cleanup() {
+ PATH="${SAVED_PATH}"
+ echo "Cleanup"
+ ansible-playbook -i teardown.docker_machine.yml playbooks/teardown.yml
+ echo "Done"
+}
+
+trap cleanup INT TERM EXIT
+
+echo "Pre-setup (install docker, docker-machine)"
+ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/pre-setup.yml
+
+echo "Print docker-machine version"
+docker-machine --version
+
+echo "Check preconditions"
+# Host should NOT be known to Ansible before the test starts
+ansible-inventory -i inventory_1.docker_machine.yml --host vm >/dev/null && exit 1
+
+echo "Test that the docker_machine inventory plugin is being loaded"
+ANSIBLE_DEBUG=yes ansible-inventory -i inventory_1.docker_machine.yml --list | grep -F "Loading InventoryModule 'docker_machine'"
+
+echo "Setup"
+ansible-playbook playbooks/setup.yml
+
+echo "Test docker_machine inventory 1"
+ansible-playbook -i inventory_1.docker_machine.yml playbooks/test_inventory_1.yml
+
+echo "Activate Docker Machine mock"
+PATH=${SCRIPT_DIR}:$PATH
+
+echo "Test docker_machine inventory 2: daemon_env=require daemon env success=yes"
+ansible-inventory -i inventory_2.docker_machine.yml --list
+
+echo "Test docker_machine inventory 2: daemon_env=require daemon env success=no"
+export MOCK_ERROR_IN=env
+ansible-inventory -i inventory_2.docker_machine.yml --list
+unset MOCK_ERROR_IN
+
+echo "Test docker_machine inventory 3: daemon_env=optional daemon env success=yes"
+ansible-inventory -i inventory_3.docker_machine.yml --list
+
+echo "Test docker_machine inventory 3: daemon_env=optional daemon env success=no"
+export MOCK_ERROR_IN=env
+ansible-inventory -i inventory_2.docker_machine.yml --list
+unset MOCK_ERROR_IN
+
+echo "Deactivate Docker Machine mock"
+PATH="${SAVED_PATH}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml
new file mode 100644
index 00000000..d1ce95ce
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_machine/teardown.docker_machine.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+plugin: community.docker.docker_machine
+daemon_env: skip
+running_required: false
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/aliases b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/aliases
new file mode 100644
index 00000000..50e0e5f3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/aliases
@@ -0,0 +1,7 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+azp/2
+destructive
+needs/root
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml
new file mode 100644
index 00000000..4371f6c1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_1.docker_swarm.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+plugin: community.docker.docker_swarm
+docker_host: unix://var/run/docker.sock
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml
new file mode 100644
index 00000000..35fe21bf
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/inventory_2.docker_swarm.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+plugin: community.docker.docker_swarm
+docker_host: unix://var/run/docker.sock
+verbose_output: false
+include_host_uri: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/meta/main.yml
new file mode 100644
index 00000000..5769ff1c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml
new file mode 100644
index 00000000..4039a6bd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_cleanup.yml
@@ -0,0 +1,22 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local
+ gather_facts: true
+ tasks:
+ - name: Make sure swarm is removed
+ docker_swarm:
+ state: absent
+ force: true
+
+ - name: remove docker pagkages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name:
+ - docker
+ - docker-ce
+ - docker-ce-cli
+ state: absent
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml
new file mode 100644
index 00000000..1ae4c63f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/swarm_setup.yml
@@ -0,0 +1,19 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local
+ vars:
+ docker_skip_cleanup: true
+
+ tasks:
+ - name: Setup docker
+ import_role:
+ name: setup_docker
+
+ - name: Create a Swarm cluster
+ community.docker.docker_swarm:
+ state: present
+ advertise_addr: "{{ansible_default_ipv4.address | default('127.0.0.1')}}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml
new file mode 100644
index 00000000..77fcc371
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_1.yml
@@ -0,0 +1,62 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: false
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Make sure docker_swarm groups are there
+ assert:
+ that:
+ - groups.all | length > 0
+ - groups.leader | length == 1
+ - groups.manager | length > 0
+ - groups.worker | length >= 0
+ - groups.nonleaders | length >= 0
+
+- hosts: all
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ vars:
+ # for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
+ # which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ tasks:
+ - name: Check for groups
+ assert:
+ that:
+ - "groups.manager | length > 0"
+ - "groups.worker | length >= 0"
+ - "groups.leader | length == 1"
+ run_once: true
+
+ - name: List manager group
+ debug:
+ var: groups.manager
+ run_once: true
+
+ - name: List worker group
+ debug:
+ var: groups.worker
+ run_once: true
+
+ - name: List leader group
+ debug:
+ var: groups.leader
+ run_once: true
+
+ - name: Print ansible_host per host
+ debug:
+ var: ansible_host
+
+ - name: Make sure docker_swarm_node_attributes is available
+ assert:
+ that:
+ - docker_swarm_node_attributes is not undefined
+ - name: Print docker_swarm_node_attributes per host
+ debug:
+ var: docker_swarm_node_attributes
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml
new file mode 100644
index 00000000..091b891a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/playbooks/test_inventory_2.yml
@@ -0,0 +1,39 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- hosts: 127.0.0.1
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ gather_facts: false
+ tasks:
+ - name: Show all groups
+ debug:
+ var: groups
+ - name: Make sure docker_swarm groups are there
+ assert:
+ that:
+ - groups.all | length > 0
+ - groups.leader | length == 1
+ - groups.manager | length > 0
+ - groups.worker | length >= 0
+ - groups.nonleaders | length >= 0
+
+- hosts: all
+ connection: local # otherwise Ansible will complain that it cannot connect via ssh to 127.0.0.1:22
+ vars:
+ # for some reason, Ansible can't find the Python interpreter when connecting to the nodes,
+ # which is in fact just localhost in disguise. That's why we use ansible_playbook_python.
+ ansible_python_interpreter: "{{ ansible_playbook_python }}"
+ tasks:
+ - name: Make sure docker_swarm_node_attributes is not available
+ assert:
+ that:
+ - docker_swarm_node_attributes is undefined
+ - name: Make sure ansible_host_uri is available
+ assert:
+ that:
+ - ansible_host_uri is defined
+ - name: Print ansible_host_uri
+ debug:
+ var: ansible_host_uri
diff --git a/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/runme.sh b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/runme.sh
new file mode 100755
index 00000000..746b8592
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/inventory_docker_swarm/runme.sh
@@ -0,0 +1,25 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+[[ -n "$DEBUG" || -n "$ANSIBLE_DEBUG" ]] && set -x
+
+set -euo pipefail
+
+cleanup() {
+ echo "Cleanup"
+ ansible-playbook playbooks/swarm_cleanup.yml
+ echo "Done"
+}
+
+trap cleanup INT TERM EXIT
+
+echo "Setup"
+ANSIBLE_ROLES_PATH=.. ansible-playbook playbooks/swarm_setup.yml
+
+echo "Test docker_swarm inventory 1"
+ansible-playbook -i inventory_1.docker_swarm.yml playbooks/test_inventory_1.yml
+
+echo "Test docker_swarm inventory 2"
+ansible-playbook -i inventory_2.docker_swarm.yml playbooks/test_inventory_2.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/aliases b/ansible_collections/community/docker/tests/integration/targets/setup_docker/aliases
new file mode 100644
index 00000000..0a430dff
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+needs/target/setup_epel
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/defaults/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/defaults/main.yml
new file mode 100644
index 00000000..0509fe0e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/defaults/main.yml
@@ -0,0 +1,23 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_cli_version: '0.0'
+docker_api_version: '0.0'
+docker_py_version: '0.0'
+docker_skip_cleanup: true
+docker_prereq_packages: []
+docker_packages:
+ - docker-ce
+docker_cli_packages:
+ - docker-ce-cli
+
+docker_pip_extra_packages: []
+docker_pip_package: docker
+docker_pip_package_limit: ''
+
+docker_cleanup_packages:
+ - docker
+ - docker-ce
+ - docker-ce-cli
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/handlers/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/handlers/main.yml
new file mode 100644
index 00000000..96ca226c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/handlers/main.yml
@@ -0,0 +1,19 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Remove pip packages
+ pip:
+ state: present
+ name: "{{ [docker_pip_package] | union(docker_pip_extra_packages) }}"
+ listen: cleanup docker
+ when: not docker_skip_cleanup | bool
+
+- name: Remove docker pagkages
+ action: "{{ ansible_facts.pkg_mgr }}"
+ args:
+ name: "{{ docker_cleanup_packages }}"
+ state: absent
+ listen: cleanup docker
+ when: not docker_skip_cleanup | bool
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/meta/main.yml
new file mode 100644
index 00000000..d4a5c7d0
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_constraints
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Alpine.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Alpine.yml
new file mode 100644
index 00000000..64f6eb34
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Alpine.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker
+ apk:
+ name: docker
+ update_cache: true
+ notify: cleanup docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Archlinux.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Archlinux.yml
new file mode 100644
index 00000000..3a67ff2b
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Archlinux.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker
+ community.general.pacman:
+ name: docker
+ update_cache: true
+ notify: cleanup docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Debian.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Debian.yml
new file mode 100644
index 00000000..0b5bdcb1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Debian.yml
@@ -0,0 +1,50 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Get OS version
+ shell: uname -r
+ register: os_version
+
+- name: Install pre-reqs
+ apt:
+ name: '{{ docker_prereq_packages }}'
+ state: present
+ update_cache: true
+ notify: cleanup docker
+
+- name: Add gpg key
+ shell: curl -fsSL https://download.docker.com/linux/{{ ansible_distribution | lower }}/gpg >key && apt-key add key
+
+- name: Add Docker repo
+ apt_repository:
+ repo: deb [arch=amd64] https://download.docker.com/linux/{{ ansible_distribution | lower }} {{ ansible_distribution_release }} stable
+ state: present
+
+- block:
+ - name: Prevent service restart
+ copy:
+ content: exit 101
+ dest: /usr/sbin/policy-rc.d
+ backup: true
+ mode: '0755'
+ register: policy_rc_d
+
+ - name: Install Docker CE
+ apt:
+ name: '{{ docker_packages if needs_docker_daemon else docker_cli_packages }}'
+ state: present
+
+ always:
+ - name: Restore /usr/sbin/policy-rc.d (if needed)
+ command: mv {{ policy_rc_d.backup_file }} /usr/sbin/policy-rc.d
+ when:
+ - '"backup_file" in policy_rc_d'
+
+ - name: Remove /usr/sbin/policy-rc.d (if needed)
+ file:
+ path: /usr/sbin/policy-rc.d
+ state: absent
+ when:
+ - '"backup_file" not in policy_rc_d'
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Fedora.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Fedora.yml
new file mode 100644
index 00000000..039751a7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Fedora.yml
@@ -0,0 +1,28 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Import GPG key
+ rpm_key:
+ key: https://download.docker.com/linux/fedora/gpg
+ state: present
+
+- name: Add repository
+ yum_repository:
+ file: docker-ce
+ name: docker-ce-stable
+ description: Docker CE Stable - $basearch
+ baseurl: https://download.docker.com/linux/fedora/{{ 31 if ansible_facts.distribution_major_version|int > 31 else '$releasever' }}/$basearch/stable
+ enabled: true
+ gpgcheck: true
+
+- name: Update cache
+ command: dnf makecache
+
+- name: Install docker
+ dnf:
+ name: "{{ docker_packages if needs_docker_daemon else docker_cli_packages }}"
+ state: present
+ enablerepo: docker-ce-test
+ notify: cleanup docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-7.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-7.yml
new file mode 100644
index 00000000..87728ec6
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-7.yml
@@ -0,0 +1,46 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# The RHEL extras repository must be enabled to provide the container-selinux package.
+# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
+
+- name: Install Docker pre-reqs
+ yum:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Install epel repo which is missing on rhel-7 and is needed for pigz (needed for docker-ce 18)
+ include_role:
+ name: setup_epel
+
+- name: Enable extras repository for RHEL on AWS
+ # RHEL 7.6 uses REGION-rhel-server-extras and RHEL 7.7+ use rhel-7-server-rhui-extras-rpms
+ command: yum-config-manager --enable REGION-rhel-server-extras rhel-7-server-rhui-extras-rpms
+
+# Docker broke their .repo file, so we set it up ourselves
+- name: Set-up repository
+ yum_repository:
+ name: docker-ce
+ description: docker-ce
+ baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable
+ gpgcheck: true
+ gpgkey: https://download.docker.com/linux/centos/gpg
+
+- name: Update cache
+ command: yum -y makecache fast
+
+- name: Install docker
+ yum:
+ name: "{{ docker_packages if needs_docker_daemon else docker_cli_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Make sure the docker daemon is running (failure expected inside docker container)
+ service:
+ name: docker
+ state: started
+ ignore_errors: "{{ ansible_virtualization_type in ['docker', 'container', 'containerd'] }}"
+ when: needs_docker_daemon
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-8.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-8.yml
new file mode 100644
index 00000000..1e259d97
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-8.yml
@@ -0,0 +1,39 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# The RHEL extras repository must be enabled to provide the container-selinux package.
+# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
+
+- name: Install Docker pre-reqs
+ dnf:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ notify: cleanup docker
+ register: result
+ until: result is success
+ retries: 10
+ delay: 2
+
+# Docker broke their .repo file, so we set it up ourselves
+- name: Set-up repository
+ yum_repository:
+ name: docker-ce
+ description: docker-ce
+ baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable
+ gpgcheck: true
+ gpgkey: https://download.docker.com/linux/centos/gpg
+
+- name: Install docker
+ dnf:
+ name: "{{ docker_packages if needs_docker_daemon else docker_cli_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Make sure the docker daemon is running (failure expected inside docker container)
+ service:
+ name: docker
+ state: started
+ ignore_errors: "{{ ansible_virtualization_type in ['docker', 'container', 'containerd'] }}"
+ when: needs_docker_daemon
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-9.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-9.yml
new file mode 100644
index 00000000..1e259d97
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/RedHat-9.yml
@@ -0,0 +1,39 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# The RHEL extras repository must be enabled to provide the container-selinux package.
+# See: https://docs.docker.com/engine/installation/linux/docker-ee/rhel/#install-using-the-repository
+
+- name: Install Docker pre-reqs
+ dnf:
+ name: "{{ docker_prereq_packages }}"
+ state: present
+ notify: cleanup docker
+ register: result
+ until: result is success
+ retries: 10
+ delay: 2
+
+# Docker broke their .repo file, so we set it up ourselves
+- name: Set-up repository
+ yum_repository:
+ name: docker-ce
+ description: docker-ce
+ baseurl: https://download.docker.com/linux/centos/{{ ansible_facts.distribution_major_version }}/$basearch/stable
+ gpgcheck: true
+ gpgkey: https://download.docker.com/linux/centos/gpg
+
+- name: Install docker
+ dnf:
+ name: "{{ docker_packages if needs_docker_daemon else docker_cli_packages }}"
+ state: present
+ notify: cleanup docker
+
+- name: Make sure the docker daemon is running (failure expected inside docker container)
+ service:
+ name: docker
+ state: started
+ ignore_errors: "{{ ansible_virtualization_type in ['docker', 'container', 'containerd'] }}"
+ when: needs_docker_daemon
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Suse.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Suse.yml
new file mode 100644
index 00000000..18974cb2
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/Suse.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker 17
+ community.general.zypper:
+ name: "{{ docker_packages if needs_docker_daemon else docker_cli_packages }}"
+ force: true
+ disable_gpg_check: true
+ update_cache: true
+ notify: cleanup docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/main.yml
new file mode 100644
index 00000000..f5f4e2ef
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/tasks/main.yml
@@ -0,0 +1,179 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Setup Docker
+ when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ block:
+ - name: Detect whether we are running inside a container
+ current_container_facts:
+
+ - name: Look for marker whether Docker was already set up
+ stat:
+ path: /root/community.docker-docker-is-set-up
+ register: docker_setup_marker
+
+ - when: not docker_setup_marker.stat.exists
+ block:
+ - name: Determine whether Docker Daemon needs to be installed
+ set_fact:
+ needs_docker_daemon: '{{ not ansible_module_running_in_container }}'
+
+ - name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+ - name: Include distribution specific tasks
+ include_tasks: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ paths:
+ - "{{ role_path }}/tasks"
+
+ - name: Make sure that docker is running
+ service:
+ name: docker
+ state: started
+ when: not ansible_module_running_in_container
+
+ - name: Set marker that Docker was already set up
+ file:
+ path: /root/community.docker-docker-is-set-up
+ state: touch
+ when: docker_skip_cleanup
+
+ # Detect docker API version
+ - name: Check Docker API version
+ command: "docker version -f {% raw %}'{{(index .Server.Components 0).Details.ApiVersion}}'{% endraw %}"
+ register: docker_api_version_stdout
+ ignore_errors: true
+
+ - name: Limit docker pypi package version to < 4.3.0
+ set_fact:
+ docker_pip_package_limit: '<4.3.0'
+ when: (docker_api_version_stdout.stdout | default('0.0')) is version('1.39', '<')
+
+ - name: Install/upgrade Python requirements
+ pip:
+ name: "{{ [docker_pip_package ~ docker_pip_package_limit] + docker_pip_extra_packages }}"
+ extra_args: "-c {{ remote_constraints }}"
+ state: "{{ 'latest' if force_docker_sdk_for_python_pypi | default(false) else 'present' }}"
+ notify: cleanup docker
+
+ # Detect docker CLI, API and docker-py versions
+ - name: Check Docker CLI version
+ command: "docker version -f {% raw %}'{{.Client.Version}}'{% endraw %}"
+ register: docker_cli_version_stdout
+ ignore_errors: true
+
+ - name: Check Docker API version
+ command: "{{ ansible_python.executable }} -c 'import docker; print(docker.from_env().version()[\"ApiVersion\"])'"
+ register: docker_api_version_stdout
+ ignore_errors: true
+
+ - name: Check docker-py API version
+ command: "{{ ansible_python.executable }} -c 'import docker; print(docker.__version__)'"
+ register: docker_py_version_stdout
+ ignore_errors: true
+
+ - set_fact:
+ docker_cli_version: "{{ docker_cli_version_stdout.stdout | default('0.0') }}"
+ docker_api_version: "{{ docker_api_version_stdout.stdout | default('0.0') }}"
+ docker_py_version: "{{ docker_py_version_stdout.stdout | default('0.0') }}"
+
+ - debug:
+ msg: "Docker CLI version: {{ docker_cli_version }}; Docker API version: {{ docker_api_version }}; docker-py library version: {{ docker_py_version }}"
+
+ - block:
+ # Cleanup docker daemon
+ - command: 'docker ps --no-trunc --format {% raw %}"{{.Names}}"{% endraw %}'
+
+ - name: "Remove all ansible-docker-test-* docker containers"
+ shell: 'docker ps --no-trunc --format {% raw %}"{{.Names}}"{% endraw %} | grep "^ansible-docker-test-" | xargs -r docker rm -f'
+ register: docker_containers
+ retries: 3
+ delay: 3
+ until: docker_containers is success
+ ignore_errors: true
+
+ - name: "Remove all ansible-docker-test-* docker volumes"
+ shell: 'docker volume ls --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-docker-test-" | xargs -r docker volume rm -f'
+ register: docker_volumes
+ ignore_errors: true
+
+ - name: "Remove all ansible-docker-test-* docker networks"
+ shell: 'docker network ls --no-trunc --format {% raw %}"{{.Name}}"{% endraw %} | grep "^ansible-docker-test-" | xargs -r docker network rm'
+ register: docker_networks
+ ignore_errors: true
+
+ - name: Cleaned docker resources
+ debug:
+ var: docker_resources
+ vars:
+ docker_resources:
+ containers: "{{ docker_containers.stdout_lines | default([]) }}"
+ volumes: "{{ docker_volumes.stdout_lines | default([]) }}"
+ networks: "{{ docker_networks.stdout_lines | default([]) }}"
+
+ # List all existing docker resources
+ - name: List all docker containers
+ command: docker ps --no-trunc -a
+ register: docker_containers
+ ignore_errors: true
+
+ - name: List all docker volumes
+ command: docker volume ls
+ register: docker_volumes
+ ignore_errors: true
+
+ - name: List all docker networks
+ command: docker network ls --no-trunc
+ register: docker_networks
+ ignore_errors: true
+
+ - name: List all docker images
+ command: docker images --no-trunc -a
+ register: docker_images
+ ignore_errors: true
+
+ - name: Still existing docker resources
+ debug:
+ var: docker_resources
+ vars:
+ docker_resources:
+ containers: "{{ docker_containers.stdout_lines | default([]) }}"
+ volumes: "{{ docker_volumes.stdout_lines | default([]) }}"
+ networks: "{{ docker_networks.stdout_lines | default([]) }}"
+ images: "{{ docker_images.stdout_lines | default([]) }}"
+
+ when: docker_cli_version is version('0.0', '>')
+
+ - name: Inspect current container
+ docker_container_info:
+ name: "{{ ansible_module_container_id }}"
+ register: current_container_info
+ when: ansible_module_running_in_container
+
+ - name: Determine network name
+ set_fact:
+ current_container_network_ip: "{{ (current_container_info.container.NetworkSettings.Networks | dictsort)[0].0 | default('') if ansible_module_running_in_container else '' }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Debian.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Debian.yml
new file mode 100644
index 00000000..78f7555d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Debian.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_prereq_packages:
+ - apt-transport-https
+ - ca-certificates
+ - curl
+ - software-properties-common
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Fedora.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Fedora.yml
new file mode 100644
index 00000000..f55df21f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Fedora.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-7.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-7.yml
new file mode 100644
index 00000000..b1e28987
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-7.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_prereq_packages:
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - libseccomp
+
+docker_pip_extra_packages:
+ - requests==2.6.0
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-8.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-8.yml
new file mode 100644
index 00000000..7609400a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-8.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_prereq_packages:
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - libseccomp
+ - iptables
+
+docker_packages:
+ - docker-ce-19.03.13
+ - docker-ce-cli-19.03.13
+docker_cli_packages:
+ - docker-ce-cli-19.03.13
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-9.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-9.yml
new file mode 100644
index 00000000..04fcae72
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/RedHat-9.yml
@@ -0,0 +1,17 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_prereq_packages:
+ - yum-utils
+ - device-mapper-persistent-data
+ - lvm2
+ - libseccomp
+ - iptables
+
+docker_packages:
+ - docker-ce # -19.03.13
+ - docker-ce-cli # -19.03.13
+docker_cli_packages:
+ - docker-ce-cli # -19.03.13
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Suse.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Suse.yml
new file mode 100644
index 00000000..ab71ef5d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Suse.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_packages:
+ - docker>=17
+
+# OpenSUSE 15 does not seem to have docker-client (https://software.opensuse.org/package/docker-client)
+# or any other Docker CLI-only package
+docker_cli_packages:
+ - docker>=17
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml
new file mode 100644
index 00000000..d7c82455
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-14.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_pip_extra_packages:
+ # Installing requests >=2.12.0 on Ubuntu 14.04 breaks certificate validation. We restrict to an older version
+ # to ensure out get_url tests work out fine. This is only an issue if pyOpenSSL is also installed.
+ # Not sure why RHEL7 needs this specific version
+ - requests==2.6.0
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-22.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-22.yml
new file mode 100644
index 00000000..436eb2d6
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/Ubuntu-22.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_prereq_packages:
+ - ca-certificates
+ - curl
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/default.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/default.yml
new file mode 100644
index 00000000..f55df21f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/default.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.env b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.env
new file mode 100644
index 00000000..52327147
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.env
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Docker images for runme.sh based tests
+DOCKER_TEST_IMAGE_PYTHON3=python:3-alpine
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.yml
new file mode 100644
index 00000000..e4eafc24
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker/vars/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_test_image_digest_v1: e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9
+docker_test_image_digest_v2: ee44b399df993016003bf5466bd3eeb221305e9d0fa831606bc7902d149c775b
+docker_test_image_digest_base: quay.io/ansible/docker-test-containers
+docker_test_image_hello_world: quay.io/ansible/docker-test-containers:hello-world
+docker_test_image_hello_world_base: quay.io/ansible/docker-test-containers
+docker_test_image_busybox: quay.io/ansible/docker-test-containers:busybox
+docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8
+docker_test_image_alpine_different: quay.io/ansible/docker-test-containers:alpine3.7
+docker_test_image_registry_nginx: quay.io/ansible/docker-test-containers:nginx-alpine
+docker_test_image_registry: registry:2.6.1
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/defaults/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/defaults/main.yml
new file mode 100644
index 00000000..f701c90e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/defaults/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip_docker_compose: false
+docker_compose_packages:
+ - docker-compose
+docker_compose_pip_packages:
+ - docker-compose
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/meta/main.yml
new file mode 100644
index 00000000..b6e985d7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_remote_constraints
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Alpine.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Alpine.yml
new file mode 100644
index 00000000..85042fdf
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Alpine.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ apk:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Archlinux.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Archlinux.yml
new file mode 100644
index 00000000..2e62ff05
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Archlinux.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ pacman:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Debian.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Debian.yml
new file mode 100644
index 00000000..1729ccab
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Debian.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ apt:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Fedora.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Fedora.yml
new file mode 100644
index 00000000..a5f3d467
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Fedora.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ dnf:
+ name: "{{ docker_compose_packages }}"
+ state: present
+ enablerepo: docker-ce-test
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-7.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-7.yml
new file mode 100644
index 00000000..62f0e373
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-7.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ yum:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-8.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-8.yml
new file mode 100644
index 00000000..54986845
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-8.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ dnf:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-9.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-9.yml
new file mode 100644
index 00000000..54986845
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/RedHat-9.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ dnf:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Suse.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Suse.yml
new file mode 100644
index 00000000..46e50fd4
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/Suse.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ community.general.zypper:
+ name: "{{ docker_compose_packages }}"
+ force: true
+ disable_gpg_check: true
+ update_cache: true
+ notify: cleanup docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/main.yml
new file mode 100644
index 00000000..63088510
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- set_fact:
+ has_docker_compose: false
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ include_tasks:
+ file: setup.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/setup.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/setup.yml
new file mode 100644
index 00000000..08c68a89
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/tasks/setup.yml
@@ -0,0 +1,59 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}-py{{ ansible_python.version.major }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}-py{{ ansible_python.version.major }}.yml"
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}-py{{ ansible_python.version.major }}.yml"
+ - "{{ ansible_facts.os_family }}-py{{ ansible_python.version.major }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+- block:
+ - name: Include distribution specific tasks
+ include_tasks: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}-py{{ ansible_python.version.major }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}-py{{ ansible_python.version.major }}.yml"
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}-py{{ ansible_python.version.major }}.yml"
+ - "{{ ansible_facts.os_family }}-py{{ ansible_python.version.major }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ paths:
+ - "{{ role_path }}/tasks"
+
+ - name: Install docker-compose
+ pip:
+ state: present
+ name: "{{ docker_compose_pip_packages }}"
+ extra_args: "-c {{ remote_constraints }}"
+
+ - name: Register docker-compose version
+ command: "{{ ansible_python.executable }} -c 'import compose; print(compose.__version__)'"
+ register: docker_compose_version
+ ignore_errors: true
+
+ - name: Declare docker-compose version
+ set_fact:
+ docker_compose_version: "{{ docker_compose_version.stdout | default('0.0.0') }}"
+
+ - name: Declare docker-compose as existing
+ set_fact:
+ has_docker_compose: true
+
+ when: not skip_docker_compose
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/CentOS-8.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/CentOS-8.yml
new file mode 100644
index 00000000..c5d18002
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/CentOS-8.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip_docker_compose: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-7.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-7.yml
new file mode 100644
index 00000000..c5d18002
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-7.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip_docker_compose: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-8.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-8.yml
new file mode 100644
index 00000000..7279eac1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-8.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_compose_packages: []
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-9.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-9.yml
new file mode 100644
index 00000000..7279eac1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/RedHat-9.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_compose_packages: []
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Suse-py2.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Suse-py2.yml
new file mode 100644
index 00000000..c5d18002
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Suse-py2.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip_docker_compose: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Suse-py3.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Suse-py3.yml
new file mode 100644
index 00000000..46c58b25
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Suse-py3.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_compose_pip_packages: []
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu-16.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu-16.yml
new file mode 100644
index 00000000..c5d18002
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu-16.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip_docker_compose: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu-18.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu-18.yml
new file mode 100644
index 00000000..c5d18002
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu-18.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip_docker_compose: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu.yml
new file mode 100644
index 00000000..46c58b25
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/Ubuntu.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_compose_pip_packages: []
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/default.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/default.yml
new file mode 100644
index 00000000..f55df21f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose/vars/default.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/defaults/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/defaults/main.yml
new file mode 100644
index 00000000..4f84d3ac
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip_docker_compose: false
+docker_compose_packages:
+ - docker-compose-plugin
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/meta/main.yml
new file mode 100644
index 00000000..b6e985d7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_docker
+ - setup_remote_constraints
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Alpine.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Alpine.yml
new file mode 100644
index 00000000..85042fdf
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Alpine.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ apk:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Archlinux.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Archlinux.yml
new file mode 100644
index 00000000..2e62ff05
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Archlinux.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ pacman:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Debian.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Debian.yml
new file mode 100644
index 00000000..1729ccab
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Debian.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ apt:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Fedora.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Fedora.yml
new file mode 100644
index 00000000..a5f3d467
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Fedora.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ dnf:
+ name: "{{ docker_compose_packages }}"
+ state: present
+ enablerepo: docker-ce-test
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-7.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-7.yml
new file mode 100644
index 00000000..62f0e373
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-7.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ yum:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-8.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-8.yml
new file mode 100644
index 00000000..54986845
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-8.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ dnf:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-9.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-9.yml
new file mode 100644
index 00000000..54986845
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/RedHat-9.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ dnf:
+ name: "{{ docker_compose_packages }}"
+ state: present
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Suse.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Suse.yml
new file mode 100644
index 00000000..46e50fd4
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/Suse.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install docker-compose as system package
+ community.general.zypper:
+ name: "{{ docker_compose_packages }}"
+ force: true
+ disable_gpg_check: true
+ update_cache: true
+ notify: cleanup docker
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/main.yml
new file mode 100644
index 00000000..e379f0fb
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/main.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- set_fact:
+ has_docker_compose: false
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6'] and ansible_python_version is version('3.7', '>=')
+ include_tasks:
+ file: setup.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/setup.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/setup.yml
new file mode 100644
index 00000000..3da96a33
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/tasks/setup.yml
@@ -0,0 +1,50 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Include distribution specific variables
+ include_vars: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ - default.yml
+ paths:
+ - "{{ role_path }}/vars"
+
+- block:
+ - name: Include distribution specific tasks
+ include_tasks: "{{ lookup('first_found', params) }}"
+ vars:
+ params:
+ files:
+ - "{{ ansible_facts.distribution }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.os_family }}-{{ ansible_facts.distribution_major_version }}.yml"
+ - "{{ ansible_facts.distribution }}.yml"
+ - "{{ ansible_facts.os_family }}.yml"
+ paths:
+ - "{{ role_path }}/tasks"
+
+ - name: Install Python on Whales
+ pip:
+ state: present
+ name: python-on-whales
+ extra_args: "-c {{ remote_constraints }}"
+
+ - name: Register docker-compose version
+ command: "docker compose version --short"
+ register: docker_compose_version
+
+ - name: Declare docker-compose version
+ set_fact:
+ docker_compose_version: "{{ docker_compose_version.stdout }}"
+
+ - name: Declare docker-compose as existing
+ set_fact:
+ has_docker_compose: '{{ docker_compose_version is version("2.0", ">=") }}'
+
+ when: not skip_docker_compose
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Alpine.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Alpine.yml
new file mode 100644
index 00000000..c5d18002
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Alpine.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip_docker_compose: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Archlinux.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Archlinux.yml
new file mode 100644
index 00000000..99d81828
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Archlinux.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_compose_packages:
+ - docker-compose
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Fedora.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Fedora.yml
new file mode 100644
index 00000000..c5d18002
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/Fedora.yml
@@ -0,0 +1,6 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+skip_docker_compose: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/default.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/default.yml
new file mode 100644
index 00000000..f55df21f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_compose_v2/vars/default.yml
@@ -0,0 +1,4 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/aliases b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/aliases
new file mode 100644
index 00000000..357972ff
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/aliases
@@ -0,0 +1,6 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+needs/target/setup_docker
+needs/target/setup_openssl
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.conf b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.conf
new file mode 100644
index 00000000..c3b0d334
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.conf
@@ -0,0 +1,50 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+events {
+ worker_connections 16;
+}
+
+http {
+ include /etc/nginx/mime.types;
+ default_type application/octet-stream;
+
+ error_log /dev/stdout info;
+ access_log /dev/stdout;
+
+ server {
+ listen *:5000 ssl;
+ server_name test-registry.ansible.com;
+ server_name_in_redirect on;
+
+ ssl_protocols TLSv1.2;
+ ssl_ciphers 'ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384:DHE-DSS-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-RSA-AES128-SHA256';
+ ssl_ecdh_curve X25519:secp521r1:secp384r1;
+ ssl_prefer_server_ciphers on;
+ ssl_certificate /etc/nginx/cert.pem;
+ ssl_certificate_key /etc/nginx/cert.key;
+
+ location / {
+ return 401;
+ }
+
+ location /v2/ {
+ proxy_pass http://real-registry:5000;
+ proxy_http_version 1.1;
+ proxy_set_header Upgrade $http_upgrade;
+ proxy_set_header Connection "upgrade";
+ proxy_set_header Host $http_host;
+ proxy_set_header X-Forwarded-Proto $scheme;
+ proxy_set_header X-Forwarded-For $remote_addr;
+ proxy_set_header X-Forwarded-Port $server_port;
+ proxy_set_header X-Request-Start $msec;
+
+ client_max_body_size 0;
+ chunked_transfer_encoding on;
+
+ auth_basic "Ansible Test Docker Registry";
+ auth_basic_user_file /etc/nginx/nginx.htpasswd;
+ }
+ }
+}
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd
new file mode 100644
index 00000000..e3ff626f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/files/nginx.htpasswd
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+testuser:{PLAIN}hunter2
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml
new file mode 100644
index 00000000..0a1f363c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/cleanup.yml
@@ -0,0 +1,59 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: "Make sure all images are removed"
+ docker_image:
+ name: "{{ item }}"
+ state: absent
+ with_items: "{{ docker_registry_setup_inames }}"
+
+- name: "Get registry logs"
+ command: "docker logs {{ docker_registry_container_name_registry }}"
+ register: registry_logs
+ no_log: true
+ ignore_errors: true
+
+- name: "Printing registry logs"
+ debug:
+ var: registry_logs.stdout_lines
+ when: registry_logs is not failed
+
+- name: "Get nginx logs for first instance"
+ command: "docker logs {{ docker_registry_container_name_nginx }}"
+ register: nginx_logs
+ no_log: true
+ ignore_errors: true
+
+- name: "Get nginx logs for second instance"
+ command: "docker logs {{ docker_registry_container_name_nginx2 }}"
+ register: nginx2_logs
+ no_log: true
+ ignore_errors: true
+
+- name: "Printing nginx logs for first instance"
+ debug:
+ var: nginx_logs.stdout_lines
+ when: nginx_logs is not failed
+
+- name: "Printing nginx logs for second instance"
+ debug:
+ var: nginx2_logs.stdout_lines
+ when: nginx_logs is not failed
+
+- name: "Make sure all containers are removed"
+ docker_container:
+ name: "{{ item }}"
+ state: absent
+ force_kill: true
+ with_items: "{{ docker_registry_setup_cnames }}"
+ register: result
+ retries: 3
+ delay: 3
+ until: result is success
+
+- name: "Make sure all volumes are removed"
+ command: "docker volume rm -f {{ item }}"
+ with_items: "{{ docker_registry_setup_vnames }}"
+ ignore_errors: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/main.yml
new file mode 100644
index 00000000..f1bdaace
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/handlers/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Remove test registry
+ include_tasks: ../handlers/cleanup.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/meta/main.yml
new file mode 100644
index 00000000..4ab14ed1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/meta/main.yml
@@ -0,0 +1,9 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ #- setup_docker -- done in setup.yml, to work around cleanup problems!
+ - setup_openssl
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/main.yml
new file mode 100644
index 00000000..55c65477
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/main.yml
@@ -0,0 +1,13 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- when: ansible_facts.distribution ~ ansible_facts.distribution_major_version not in ['CentOS6', 'RedHat6']
+ include_tasks:
+ file: setup.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml
new file mode 100644
index 00000000..25bb2029
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup-frontend.yml
@@ -0,0 +1,120 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Set up first nginx frontend for registry
+- name: Start nginx frontend for registry
+ docker_volume:
+ name: '{{ docker_registry_container_name_frontend }}'
+ state: present
+
+- name: Create container for nginx frontend for registry
+ docker_container:
+ state: stopped
+ name: '{{ docker_registry_container_name_frontend }}'
+ image: "{{ docker_test_image_registry_nginx }}"
+ ports: 5000
+ # `links` does not work when using a network. That's why the docker_container task
+ # in setup.yml specifies `aliases` so we get the same effect.
+ links:
+ - '{{ docker_registry_container_name_registry }}:real-registry'
+ volumes:
+ - '{{ docker_registry_container_name_frontend }}:/etc/nginx/'
+ network_mode: '{{ current_container_network_ip | default(omit, true) }}'
+ networks: >-
+ {{
+ [dict([['name', current_container_network_ip]])]
+ if current_container_network_ip not in ['', 'bridge'] else omit
+ }}
+ register: nginx_container
+
+- name: Copy config files
+ copy:
+ src: "{{ item }}"
+ dest: "{{ remote_tmp_dir }}/{{ item }}"
+ mode: "0644"
+ loop:
+ - nginx.conf
+ - nginx.htpasswd
+
+- name: Copy static files into volume
+ docker_container_copy_into:
+ container: '{{ docker_registry_container_name_frontend }}'
+ path: '{{ remote_tmp_dir }}/{{ item }}'
+ container_path: '/etc/nginx/{{ item }}'
+ owner_id: 0
+ group_id: 0
+ loop:
+ - nginx.conf
+ - nginx.htpasswd
+ register: can_copy_files
+ ignore_errors: true
+
+- when: can_copy_files is not failed
+ block:
+
+ - name: Create private key for frontend certificate
+ community.crypto.openssl_privatekey:
+ path: '{{ remote_tmp_dir }}/cert.key'
+ type: ECC
+ curve: secp256r1
+ force: true
+
+ - name: Create CSR for frontend certificate
+ community.crypto.openssl_csr:
+ path: '{{ remote_tmp_dir }}/cert.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/cert.key'
+ subject_alt_name:
+ - DNS:test-registry.ansible.com
+
+ - name: Create frontend certificate
+ community.crypto.x509_certificate:
+ path: '{{ remote_tmp_dir }}/cert.pem'
+ csr_path: '{{ remote_tmp_dir }}/cert.csr'
+ privatekey_path: '{{ remote_tmp_dir }}/cert.key'
+ provider: selfsigned
+
+ - name: Copy dynamic files into volume
+ docker_container_copy_into:
+ container: '{{ docker_registry_container_name_frontend }}'
+ path: '{{ remote_tmp_dir }}/{{ item }}'
+ container_path: '/etc/nginx/{{ item }}'
+ owner_id: 0
+ group_id: 0
+ loop:
+ - cert.pem
+ - cert.key
+
+ - name: Start nginx frontend for registry
+ docker_container:
+ name: '{{ docker_registry_container_name_frontend }}'
+ state: started
+ register: nginx_container
+
+ - name: Output nginx container network settings
+ debug:
+ var: nginx_container.container.NetworkSettings
+
+ - name: Get registry URL
+ set_fact:
+ # Note that this host/port combination is used by the Docker daemon, that's why `localhost` is appropriate!
+ # This host/port combination cannot be used if the tests are running inside a docker container.
+ docker_registry_frontend_address: localhost:{{ nginx_container.container.NetworkSettings.Ports['5000/tcp'].0.HostPort }}
+ # The following host/port combination can be used from inside the docker container.
+ docker_registry_frontend_address_internal: "{{ nginx_container.container.NetworkSettings.Networks[current_container_network_ip].IPAddress if current_container_network_ip else nginx_container.container.NetworkSettings.IPAddress }}:5000"
+
+ - name: Wait for registry frontend
+ uri:
+ url: https://{{ docker_registry_frontend_address_internal }}/v2/
+ url_username: testuser
+ url_password: hunter2
+ validate_certs: false
+ register: result
+ until: result is success
+ retries: 5
+ delay: 1
+
+- set_fact:
+ docker_registry_frontend_address: 'n/a'
+ when: can_copy_files is failed
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup.yml
new file mode 100644
index 00000000..b3a8662e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/tasks/setup.yml
@@ -0,0 +1,84 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Register registry cleanup
+ # This must be done **before** docker is set up (see next task), to ensure that the
+ # registry is removed **before** docker itself is removed. This is necessary as the
+ # registry and its frontends run as docker containers.
+ command: 'true'
+ notify: Remove test registry
+
+- name: Setup Docker
+ # Please note that we do setup_docker here and not via meta/main.yml to avoid the problem that
+ # our cleanup is called **after** setup_docker's cleanup has been called!
+ include_role:
+ name: setup_docker
+
+- name: Create random name prefix and test registry name
+ set_fact:
+ docker_registry_container_name_registry: '{{ ''ansible-docker-test-registry-%0x'' % ((2**32) | random) }}'
+ docker_registry_container_name_nginx: '{{ ''ansible-docker-test-registry-frontend-%0x'' % ((2**32) | random) }}'
+ docker_registry_container_name_nginx2: '{{ ''ansible-docker-test-registry-frontend2-%0x'' % ((2**32) | random) }}'
+
+- name: Create image and container list
+ set_fact:
+ docker_registry_setup_inames: []
+ docker_registry_setup_cnames:
+ - '{{ docker_registry_container_name_registry }}'
+ - '{{ docker_registry_container_name_nginx }}'
+ - '{{ docker_registry_container_name_nginx2 }}'
+ docker_registry_setup_vnames:
+ - '{{ docker_registry_container_name_nginx }}'
+ - '{{ docker_registry_container_name_nginx2 }}'
+
+- debug:
+ msg: Using test registry name {{ docker_registry_container_name_registry }} and nginx frontend names {{ docker_registry_container_name_nginx }} and {{ docker_registry_container_name_nginx2 }}
+
+- fail: msg="Too old docker / docker-py version to set up docker registry!"
+ when: not(docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.25', '>=')) and (ansible_distribution != 'CentOS' or ansible_distribution_major_version|int > 6)
+
+- when: docker_py_version is version('1.8.0', '>=') and docker_api_version is version('1.25', '>=')
+ block:
+
+ # Set up registry container
+ - name: Start test registry
+ docker_container:
+ name: '{{ docker_registry_container_name_registry }}'
+ image: "{{ docker_test_image_registry }}"
+ ports: 5000
+ network_mode: '{{ current_container_network_ip | default(omit, true) }}'
+ # We need to define the alias `real-registry` here because the global `links`
+ # option for the NGINX containers (see setup-frontend.yml) does not work when
+ # using networks.
+ networks: >-
+ {{
+ [dict([['name', current_container_network_ip], ['aliases', ['real-registry']]])]
+ if current_container_network_ip not in ['', 'bridge'] else omit
+ }}
+ register: registry_container
+
+ - name: Get registry URL
+ set_fact:
+ registry_address: localhost:{{ registry_container.container.NetworkSettings.Ports['5000/tcp'].0.HostPort }}
+
+ # Set up first nginx frontend for registry
+ - include_tasks: setup-frontend.yml
+ vars:
+ docker_registry_container_name_frontend: '{{ docker_registry_container_name_nginx }}'
+
+ - set_fact:
+ registry_frontend_address: '{{ docker_registry_frontend_address }}'
+
+ # Set up second nginx frontend for registry
+ - include_tasks: setup-frontend.yml
+ vars:
+ docker_registry_container_name_frontend: '{{ docker_registry_container_name_nginx2 }}'
+
+ - set_fact:
+ registry_frontend2_address: '{{ docker_registry_frontend_address }}'
+
+ # Print addresses for registry and frontends
+ - debug:
+ msg: "Registry available under {{ registry_address }}, NGINX frontends available under {{ registry_frontend_address }} and {{ registry_frontend2_address }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/vars/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/vars/main.yml
new file mode 100644
index 00000000..e4eafc24
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_docker_registry/vars/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+docker_test_image_digest_v1: e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9
+docker_test_image_digest_v2: ee44b399df993016003bf5466bd3eeb221305e9d0fa831606bc7902d149c775b
+docker_test_image_digest_base: quay.io/ansible/docker-test-containers
+docker_test_image_hello_world: quay.io/ansible/docker-test-containers:hello-world
+docker_test_image_hello_world_base: quay.io/ansible/docker-test-containers
+docker_test_image_busybox: quay.io/ansible/docker-test-containers:busybox
+docker_test_image_alpine: quay.io/ansible/docker-test-containers:alpine3.8
+docker_test_image_alpine_different: quay.io/ansible/docker-test-containers:alpine3.7
+docker_test_image_registry_nginx: quay.io/ansible/docker-test-containers:nginx-alpine
+docker_test_image_registry: registry:2.6.1
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_epel/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_epel/tasks/main.yml
new file mode 100644
index 00000000..9aa12db8
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_epel/tasks/main.yml
@@ -0,0 +1,15 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Install EPEL
+ yum:
+ name: https://s3.amazonaws.com/ansible-ci-files/test/integration/targets/setup_epel/epel-release-latest-{{ ansible_distribution_major_version }}.noarch.rpm
+ disable_gpg_check: true
+ when: ansible_facts.distribution in ['RedHat', 'CentOS']
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_openssl/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/meta/main.yml
new file mode 100644
index 00000000..d4a5c7d0
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_constraints
+ - setup_pkg_mgr
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_openssl/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/tasks/main.yml
new file mode 100644
index 00000000..b84159b7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/tasks/main.yml
@@ -0,0 +1,35 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: Include OS-specific variables
+ include_vars: '{{ ansible_os_family }}.yml'
+ when: not ansible_os_family == "Darwin"
+
+- name: Install cryptography (Python 3)
+ become: true
+ package:
+ name: '{{ cryptography_package_name_python3 }}'
+ when: ansible_os_family != 'Darwin' and ansible_python_version is version('3.0', '>=')
+
+- name: Install cryptography (Python 2)
+ become: true
+ package:
+ name: '{{ cryptography_package_name }}'
+ when: ansible_os_family != 'Darwin' and ansible_python_version is version('3.0', '<')
+
+- name: Install cryptography (Darwin, and potentially upgrade for other OSes)
+ become: true
+ pip:
+ name: cryptography>=1.3.0
+ extra_args: "-c {{ remote_constraints }}"
+
+- name: Register cryptography version
+ command: "{{ ansible_python.executable }} -c 'import cryptography; print(cryptography.__version__)'"
+ register: cryptography_version
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Alpine.yml b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Alpine.yml
new file mode 100644
index 00000000..46007479
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Alpine.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: py-cryptography
+cryptography_package_name_python3: py3-cryptography
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Archlinux.yml b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Archlinux.yml
new file mode 100644
index 00000000..9fa799eb
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Archlinux.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python-cryptography
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Debian.yml b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Debian.yml
new file mode 100644
index 00000000..4a3dc629
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Debian.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python3-cryptography
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/FreeBSD.yml b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/FreeBSD.yml
new file mode 100644
index 00000000..0bf73ee8
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/FreeBSD.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: py27-cryptography
+cryptography_package_name_python3: py36-cryptography
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/RedHat.yml b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/RedHat.yml
new file mode 100644
index 00000000..4a3dc629
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/RedHat.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python3-cryptography
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Suse.yml b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Suse.yml
new file mode 100644
index 00000000..4a3dc629
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_openssl/vars/Suse.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+cryptography_package_name: python-cryptography
+cryptography_package_name_python3: python3-cryptography
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_paramiko/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_paramiko/meta/main.yml
new file mode 100644
index 00000000..7172cbe2
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_paramiko/meta/main.yml
@@ -0,0 +1,8 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_constraints
+ - setup_openssl # so cryptography is installed
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_paramiko/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_paramiko/tasks/main.yml
new file mode 100644
index 00000000..fc29334c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_paramiko/tasks/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: Install paramiko
+ pip:
+ name: "paramiko{% if cryptography_version.stdout is version('2.5.0', '<') %}<2.5.0{% endif %}"
+ extra_args: "-c {{ remote_constraints }}"
+ become: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_pkg_mgr/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_pkg_mgr/tasks/main.yml
new file mode 100644
index 00000000..bc74b251
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_pkg_mgr/tasks/main.yml
@@ -0,0 +1,28 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- set_fact:
+ pkg_mgr: community.general.pkgng
+ ansible_pkg_mgr: community.general.pkgng
+ cacheable: true
+ when: ansible_os_family == "FreeBSD"
+
+- set_fact:
+ pkg_mgr: community.general.zypper
+ ansible_pkg_mgr: community.general.zypper
+ cacheable: true
+ when: ansible_os_family == "Suse"
+
+- shell:
+ cmd: |
+ sed -i 's/mirrorlist/#mirrorlist/g' /etc/yum.repos.d/CentOS-Linux-*.repo
+ sed -i 's%#baseurl=http://mirror.centos.org/$contentdir/$releasever/%baseurl=https://vault.centos.org/8.4.2105/%g' /etc/yum.repos.d/CentOS-Linux-*.repo
+ ignore_errors: true # This fails for CentOS Stream 8
+ when: ansible_distribution in 'CentOS' and ansible_distribution_major_version == '8'
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/aliases b/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/aliases
new file mode 100644
index 00000000..27ce6b08
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/aliases
@@ -0,0 +1,5 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+needs/file/tests/utils/constraints.txt
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/meta/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/meta/main.yml
new file mode 100644
index 00000000..982de6eb
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/meta/main.yml
@@ -0,0 +1,7 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+dependencies:
+ - setup_remote_tmp_dir
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/tasks/main.yml
new file mode 100644
index 00000000..7e913fc9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_remote_constraints/tasks/main.yml
@@ -0,0 +1,18 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: record constraints.txt path on remote host
+ set_fact:
+ remote_constraints: "{{ remote_tmp_dir }}/constraints.txt"
+
+- name: copy constraints.txt to remote host
+ copy:
+ src: "{{ role_path }}/../../../utils/constraints.txt"
+ dest: "{{ remote_constraints }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
new file mode 100644
index 00000000..f1c55b04
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/handlers/main.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: delete temporary directory
+ include_tasks: default-cleanup.yml
+
+- name: delete temporary directory (windows)
+ include_tasks: windows-cleanup.yml
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml b/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
new file mode 100644
index 00000000..cc74b70a
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default-cleanup.yml
@@ -0,0 +1,10 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: delete temporary directory
+ file:
+ path: "{{ remote_tmp_dir }}"
+ state: absent
+ no_log: true
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml b/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
new file mode 100644
index 00000000..c9d871c6
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/default.yml
@@ -0,0 +1,16 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+- name: create temporary directory
+ tempfile:
+ state: directory
+ suffix: .test
+ register: remote_tmp_dir
+ notify:
+ - delete temporary directory
+
+- name: record temporary directory
+ set_fact:
+ remote_tmp_dir: "{{ remote_tmp_dir.path }}"
diff --git a/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml b/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
new file mode 100644
index 00000000..babbdad0
--- /dev/null
+++ b/ansible_collections/community/docker/tests/integration/targets/setup_remote_tmp_dir/tasks/main.yml
@@ -0,0 +1,20 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+####################################################################
+# WARNING: These are designed specifically for Ansible tests #
+# and should not be used as examples of how to write Ansible roles #
+####################################################################
+
+- name: make sure we have the ansible_os_family and ansible_distribution_version facts
+ setup:
+ gather_subset: distribution
+ when: ansible_facts == {}
+
+- include_tasks: "{{ lookup('first_found', files)}}"
+ vars:
+ files:
+ - "{{ ansible_os_family | lower }}.yml"
+ - "default.yml"
diff --git a/ansible_collections/community/docker/tests/requirements.yml b/ansible_collections/community/docker/tests/requirements.yml
new file mode 100644
index 00000000..fa0ba167
--- /dev/null
+++ b/ansible_collections/community/docker/tests/requirements.yml
@@ -0,0 +1,12 @@
+---
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+integration_tests_dependencies:
+- ansible.posix
+- community.internal_test_tools
+- community.crypto
+- community.general
+unit_tests_dependencies:
+- community.internal_test_tools
diff --git a/ansible_collections/community/docker/tests/sanity/extra/extra-docs.json b/ansible_collections/community/docker/tests/sanity/extra/extra-docs.json
new file mode 100644
index 00000000..c2e612e5
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/extra-docs.json
@@ -0,0 +1,10 @@
+{
+ "include_symlinks": false,
+ "prefixes": [
+ "docs/docsite/"
+ ],
+ "output": "path-line-column-message",
+ "requirements": [
+ "antsibull-docs"
+ ]
+}
diff --git a/ansible_collections/community/docker/tests/sanity/extra/extra-docs.json.license b/ansible_collections/community/docker/tests/sanity/extra/extra-docs.json.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/extra-docs.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/sanity/extra/extra-docs.py b/ansible_collections/community/docker/tests/sanity/extra/extra-docs.py
new file mode 100755
index 00000000..67310492
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/extra-docs.py
@@ -0,0 +1,24 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""Check extra collection docs with antsibull-docs."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+import subprocess
+
+
+def main():
+ """Main entry point."""
+ if not os.path.isdir(os.path.join('docs', 'docsite')):
+ return
+ p = subprocess.run(['antsibull-docs', 'lint-collection-docs', '.'], check=False)
+ if p.returncode not in (0, 3):
+ print('{0}:0:0: unexpected return code {1}'.format(sys.argv[0], p.returncode))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/tests/sanity/extra/licenses.json b/ansible_collections/community/docker/tests/sanity/extra/licenses.json
new file mode 100644
index 00000000..50e47ca8
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/licenses.json
@@ -0,0 +1,4 @@
+{
+ "include_symlinks": false,
+ "output": "path-message"
+}
diff --git a/ansible_collections/community/docker/tests/sanity/extra/licenses.json.license b/ansible_collections/community/docker/tests/sanity/extra/licenses.json.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/licenses.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/sanity/extra/licenses.py b/ansible_collections/community/docker/tests/sanity/extra/licenses.py
new file mode 100755
index 00000000..80eb795e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/licenses.py
@@ -0,0 +1,110 @@
+#!/usr/bin/env python
+# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""Prevent files without a correct license identifier from being added to the source tree."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import glob
+import sys
+
+
+def format_license_list(licenses):
+ if not licenses:
+ return '(empty)'
+ return ', '.join(['"%s"' % license for license in licenses])
+
+
+def find_licenses(filename, relax=False):
+ spdx_license_identifiers = []
+ other_license_identifiers = []
+ has_copyright = False
+ try:
+ with open(filename, 'r', encoding='utf-8') as f:
+ for line in f:
+ line = line.rstrip()
+ if 'Copyright ' in line:
+ has_copyright = True
+ if 'Copyright: ' in line:
+ print('%s: found copyright line with "Copyright:". Please remove the colon.' % (filename, ))
+ if 'SPDX-FileCopyrightText: ' in line:
+ has_copyright = True
+ idx = line.find('SPDX-License-Identifier: ')
+ if idx >= 0:
+ lic_id = line[idx + len('SPDX-License-Identifier: '):]
+ spdx_license_identifiers.extend(lic_id.split(' OR '))
+ if 'GNU General Public License' in line:
+ if 'v3.0+' in line:
+ other_license_identifiers.append('GPL-3.0-or-later')
+ if 'version 3 or later' in line:
+ other_license_identifiers.append('GPL-3.0-or-later')
+ if 'Simplified BSD License' in line:
+ other_license_identifiers.append('BSD-2-Clause')
+ if 'Apache License 2.0' in line:
+ other_license_identifiers.append('Apache-2.0')
+ if 'PSF License' in line or 'Python-2.0' in line:
+ other_license_identifiers.append('PSF-2.0')
+ if 'MIT License' in line:
+ other_license_identifiers.append('MIT')
+ except Exception as exc:
+ print('%s: error while processing file: %s' % (filename, exc))
+ if len(set(spdx_license_identifiers)) < len(spdx_license_identifiers):
+ print('%s: found identical SPDX-License-Identifier values' % (filename, ))
+ if other_license_identifiers and set(other_license_identifiers) != set(spdx_license_identifiers):
+ print('%s: SPDX-License-Identifier yielded the license list %s, while manual guessing yielded the license list %s' % (
+ filename, format_license_list(spdx_license_identifiers), format_license_list(other_license_identifiers)))
+ if not has_copyright and not relax:
+ print('%s: found no copyright notice' % (filename, ))
+ return sorted(spdx_license_identifiers)
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ # The following paths are allowed to have no license identifier
+ no_comments_allowed = [
+ 'changelogs/fragments/*.yml',
+ 'changelogs/fragments/*.yaml',
+ ]
+
+ # These files are completely ignored
+ ignore_paths = [
+ '.ansible-test-timeout.json',
+ '.reuse/dep5',
+ 'LICENSES/*.txt',
+ 'COPYING',
+ ]
+
+ no_comments_allowed = [fn for pattern in no_comments_allowed for fn in glob.glob(pattern)]
+ ignore_paths = [fn for pattern in ignore_paths for fn in glob.glob(pattern)]
+
+ valid_licenses = [license_file[len('LICENSES/'):-len('.txt')] for license_file in glob.glob('LICENSES/*.txt')]
+
+ for path in paths:
+ if path.startswith('./'):
+ path = path[2:]
+ if path in ignore_paths or path.startswith('tests/output/'):
+ continue
+ if os.stat(path).st_size == 0:
+ continue
+ if not path.endswith('.license') and os.path.exists(path + '.license'):
+ path = path + '.license'
+ valid_licenses_for_path = valid_licenses
+ if path.startswith('plugins/') and not path.startswith(('plugins/modules/', 'plugins/module_utils/')):
+ valid_licenses_for_path = [license for license in valid_licenses if license == 'GPL-3.0-or-later']
+ licenses = find_licenses(path, relax=path in no_comments_allowed)
+ if not licenses:
+ if path not in no_comments_allowed:
+ print('%s: must have at least one license' % (path, ))
+ else:
+ for license in licenses:
+ if license not in valid_licenses_for_path:
+ print('%s: found not allowed license "%s", must be one of %s' % (
+ path, license, format_license_list(valid_licenses_for_path)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/tests/sanity/extra/licenses.py.license b/ansible_collections/community/docker/tests/sanity/extra/licenses.py.license
new file mode 100644
index 00000000..6c4958fe
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/licenses.py.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: 2022, Felix Fontein <felix@fontein.de>
diff --git a/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json b/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json
new file mode 100644
index 00000000..c789a7fd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json
@@ -0,0 +1,7 @@
+{
+ "include_symlinks": true,
+ "prefixes": [
+ "plugins/"
+ ],
+ "output": "path-message"
+}
diff --git a/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json.license b/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.json.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.py b/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.py
new file mode 100755
index 00000000..51444ab7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/extra/no-unwanted-files.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+"""Prevent unwanted files from being added to the source tree."""
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+
+def main():
+ """Main entry point."""
+ paths = sys.argv[1:] or sys.stdin.read().splitlines()
+
+ allowed_extensions = (
+ '.cs',
+ '.ps1',
+ '.psm1',
+ '.py',
+ )
+
+ skip_paths = set([
+ ])
+
+ skip_directories = (
+ )
+
+ for path in paths:
+ if path in skip_paths:
+ continue
+
+ if any(path.startswith(skip_directory) for skip_directory in skip_directories):
+ continue
+
+ ext = os.path.splitext(path)[1]
+
+ if ext not in allowed_extensions:
+ print('%s: extension must be one of: %s' % (path, ', '.join(allowed_extensions)))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.10.txt b/ansible_collections/community/docker/tests/sanity/ignore-2.10.txt
new file mode 100644
index 00000000..1a9f4888
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.10.txt
@@ -0,0 +1,11 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+.azure-pipelines/scripts/publish-codecov.py compile-2.6!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py compile-2.7!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py compile-3.5!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py future-import-boilerplate
+.azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate
+plugins/modules/current_container_facts.py validate-modules:return-syntax-error
+plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax
+plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax
+plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax
+plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.10.txt.license b/ansible_collections/community/docker/tests/sanity/ignore-2.10.txt.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.10.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.11.txt b/ansible_collections/community/docker/tests/sanity/ignore-2.11.txt
new file mode 100644
index 00000000..1a9f4888
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.11.txt
@@ -0,0 +1,11 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+.azure-pipelines/scripts/publish-codecov.py compile-2.6!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py compile-2.7!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py compile-3.5!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py future-import-boilerplate
+.azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate
+plugins/modules/current_container_facts.py validate-modules:return-syntax-error
+plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax
+plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax
+plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax
+plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.11.txt.license b/ansible_collections/community/docker/tests/sanity/ignore-2.11.txt.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.11.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.12.txt b/ansible_collections/community/docker/tests/sanity/ignore-2.12.txt
new file mode 100644
index 00000000..3d71834d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.12.txt
@@ -0,0 +1,3 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/current_container_facts.py validate-modules:return-syntax-error
+plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.12.txt.license b/ansible_collections/community/docker/tests/sanity/ignore-2.12.txt.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.12.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.13.txt b/ansible_collections/community/docker/tests/sanity/ignore-2.13.txt
new file mode 100644
index 00000000..2a06013d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.13.txt
@@ -0,0 +1,2 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.13.txt.license b/ansible_collections/community/docker/tests/sanity/ignore-2.13.txt.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.13.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.14.txt b/ansible_collections/community/docker/tests/sanity/ignore-2.14.txt
new file mode 100644
index 00000000..2a06013d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.14.txt
@@ -0,0 +1,2 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.14.txt.license b/ansible_collections/community/docker/tests/sanity/ignore-2.14.txt.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.14.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.15.txt b/ansible_collections/community/docker/tests/sanity/ignore-2.15.txt
new file mode 100644
index 00000000..2a06013d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.15.txt
@@ -0,0 +1,2 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.15.txt.license b/ansible_collections/community/docker/tests/sanity/ignore-2.15.txt.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.15.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.9.txt b/ansible_collections/community/docker/tests/sanity/ignore-2.9.txt
new file mode 100644
index 00000000..81b68cbd
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.9.txt
@@ -0,0 +1,10 @@
+.azure-pipelines/scripts/publish-codecov.py replace-urlopen
+.azure-pipelines/scripts/publish-codecov.py compile-2.6!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py compile-2.7!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py compile-3.5!skip # Uses Python 3.6+ syntax
+.azure-pipelines/scripts/publish-codecov.py future-import-boilerplate
+.azure-pipelines/scripts/publish-codecov.py metaclass-boilerplate
+plugins/module_utils/module_container/module.py compile-2.6!skip # Uses Python 2.7+ syntax
+plugins/module_utils/module_container/module.py import-2.6!skip # Uses Python 2.7+ syntax
+plugins/modules/docker_container.py import-2.6!skip # Import uses Python 2.7+ syntax
+plugins/modules/docker_container_copy_into.py validate-modules:undocumented-parameter # _max_file_size_for_diff is used by the action plugin
diff --git a/ansible_collections/community/docker/tests/sanity/ignore-2.9.txt.license b/ansible_collections/community/docker/tests/sanity/ignore-2.9.txt.license
new file mode 100644
index 00000000..edff8c76
--- /dev/null
+++ b/ansible_collections/community/docker/tests/sanity/ignore-2.9.txt.license
@@ -0,0 +1,3 @@
+GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+SPDX-License-Identifier: GPL-3.0-or-later
+SPDX-FileCopyrightText: Ansible Project
diff --git a/ansible_collections/community/docker/tests/unit/compat/__init__.py b/ansible_collections/community/docker/tests/unit/compat/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/compat/__init__.py
diff --git a/ansible_collections/community/docker/tests/unit/compat/builtins.py b/ansible_collections/community/docker/tests/unit/compat/builtins.py
new file mode 100644
index 00000000..d548601d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/compat/builtins.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+#
+# Compat for python2.7
+#
+
+# One unittest needs to import builtins via __import__() so we need to have
+# the string that represents it
+try:
+ import __builtin__ # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ BUILTINS = 'builtins'
+else:
+ BUILTINS = '__builtin__'
diff --git a/ansible_collections/community/docker/tests/unit/compat/mock.py b/ansible_collections/community/docker/tests/unit/compat/mock.py
new file mode 100644
index 00000000..6ef80a7c
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/compat/mock.py
@@ -0,0 +1,30 @@
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python3.x's unittest.mock module
+'''
+import sys # noqa: F401, pylint: disable=unused-import
+
+# Python 2.7
+
+# Note: Could use the pypi mock library on python3.x as well as python2.x. It
+# is the same as the python3 stdlib mock library
+
+try:
+ # Allow wildcard import because we really do want to import all of mock's
+ # symbols into this compat shim
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ from unittest.mock import * # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ # Python 2
+ # pylint: disable=wildcard-import,unused-wildcard-import
+ try:
+ from mock import * # noqa: F401, pylint: disable=unused-import
+ except ImportError:
+ print('You need the mock library installed on python2.x to run tests')
diff --git a/ansible_collections/community/docker/tests/unit/compat/unittest.py b/ansible_collections/community/docker/tests/unit/compat/unittest.py
new file mode 100644
index 00000000..1872e583
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/compat/unittest.py
@@ -0,0 +1,25 @@
+# Copyright (c) 2014, Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+'''
+Compat module for Python2.7's unittest module
+'''
+
+import sys
+
+# Allow wildcard import because we really do want to import all of
+# unittests's symbols into this compat shim
+# pylint: disable=wildcard-import,unused-wildcard-import
+if sys.version_info < (2, 7):
+ try:
+ # Need unittest2 on python2.6
+ from unittest2 import *
+ except ImportError:
+ print('You need unittest2 installed on python2.6.x to run tests')
+else:
+ from unittest import *
diff --git a/ansible_collections/community/docker/tests/unit/plugins/connection/test_docker.py b/ansible_collections/community/docker/tests/unit/plugins/connection/test_docker.py
new file mode 100644
index 00000000..5ae6a8e1
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/connection/test_docker.py
@@ -0,0 +1,57 @@
+# Copyright (c) 2020 Red Hat, Inc.
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+# Make coding more python3-ish
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from io import StringIO
+
+from ansible_collections.community.docker.tests.unit.compat import mock
+from ansible_collections.community.docker.tests.unit.compat import unittest
+from ansible.errors import AnsibleError
+from ansible.playbook.play_context import PlayContext
+from ansible.plugins.loader import connection_loader
+
+
+class TestDockerConnectionClass(unittest.TestCase):
+
+ def setUp(self):
+ self.play_context = PlayContext()
+ self.play_context.prompt = (
+ '[sudo via ansible, key=ouzmdnewuhucvuaabtjmweasarviygqq] password: '
+ )
+ self.in_stream = StringIO()
+ with mock.patch('ansible_collections.community.docker.plugins.connection.docker.get_bin_path', return_value='docker'):
+ self.dc = connection_loader.get('community.docker.docker', self.play_context, self.in_stream)
+
+ def tearDown(self):
+ pass
+
+ @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._old_docker_version',
+ return_value=('false', 'garbage', '', 1))
+ @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._new_docker_version',
+ return_value=('docker version', '1.2.3', '', 0))
+ def test_docker_connection_module_too_old(self, mock_new_docker_verison, mock_old_docker_version):
+ self.dc._version = None
+ self.dc.remote_user = 'foo'
+ self.assertRaisesRegexp(AnsibleError, '^docker connection type requires docker 1.3 or higher$', self.dc._get_actual_user)
+
+ @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._old_docker_version',
+ return_value=('false', 'garbage', '', 1))
+ @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._new_docker_version',
+ return_value=('docker version', '1.7.0', '', 0))
+ def test_docker_connection_module(self, mock_new_docker_verison, mock_old_docker_version):
+ self.dc._version = None
+ version = self.dc.docker_version
+
+ # old version and new version fail
+ @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._old_docker_version',
+ return_value=('false', 'garbage', '', 1))
+ @mock.patch('ansible_collections.community.docker.plugins.connection.docker.Connection._new_docker_version',
+ return_value=('false', 'garbage', '', 1))
+ def test_docker_connection_module_wrong_cmd(self, mock_new_docker_version, mock_old_docker_version):
+ self.dc._version = None
+ self.dc.remote_user = 'foo'
+ self.assertRaisesRegexp(AnsibleError, '^Docker version check (.*?) failed: ', self.dc._get_actual_user)
diff --git a/ansible_collections/community/docker/tests/unit/plugins/inventory/test_docker_containers.py b/ansible_collections/community/docker/tests/unit/plugins/inventory/test_docker_containers.py
new file mode 100644
index 00000000..ea16c0d9
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/inventory/test_docker_containers.py
@@ -0,0 +1,214 @@
+# Copyright (c), Felix Fontein <felix@fontein.de>, 2020
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import pytest
+
+from ansible.inventory.data import InventoryData
+
+from ansible_collections.community.docker.plugins.inventory.docker_containers import InventoryModule
+
+
+@pytest.fixture(scope="module")
+def inventory():
+ r = InventoryModule()
+ r.inventory = InventoryData()
+ return r
+
+
+LOVING_THARP = {
+ 'Id': '7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a',
+ 'Name': '/loving_tharp',
+ 'Image': 'sha256:349f492ff18add678364a62a67ce9a13487f14293ae0af1baf02398aa432f385',
+ 'State': {
+ 'Running': True,
+ },
+ 'Config': {
+ 'Image': 'quay.io/ansible/ubuntu1804-test-container:1.21.0',
+ },
+}
+
+
+LOVING_THARP_STACK = {
+ 'Id': '7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a',
+ 'Name': '/loving_tharp',
+ 'Image': 'sha256:349f492ff18add678364a62a67ce9a13487f14293ae0af1baf02398aa432f385',
+ 'State': {
+ 'Running': True,
+ },
+ 'Config': {
+ 'Image': 'quay.io/ansible/ubuntu1804-test-container:1.21.0',
+ 'Labels': {
+ 'com.docker.stack.namespace': 'my_stack',
+ },
+ },
+ 'NetworkSettings': {
+ 'Ports': {
+ '22/tcp': [
+ {
+ 'HostIp': '0.0.0.0',
+ 'HostPort': '32802'
+ }
+ ],
+ },
+ },
+}
+
+
+LOVING_THARP_SERVICE = {
+ 'Id': '7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a',
+ 'Name': '/loving_tharp',
+ 'Image': 'sha256:349f492ff18add678364a62a67ce9a13487f14293ae0af1baf02398aa432f385',
+ 'State': {
+ 'Running': True,
+ },
+ 'Config': {
+ 'Image': 'quay.io/ansible/ubuntu1804-test-container:1.21.0',
+ 'Labels': {
+ 'com.docker.swarm.service.name': 'my_service',
+ },
+ },
+}
+
+
+def create_get_option(options, default=False):
+ def get_option(option):
+ if option in options:
+ return options[option]
+ return default
+
+ return get_option
+
+
+class FakeClient(object):
+ def __init__(self, *hosts):
+ self.get_results = {}
+ list_reply = []
+ for host in hosts:
+ list_reply.append({
+ 'Id': host['Id'],
+ 'Names': [host['Name']] if host['Name'] else [],
+ 'Image': host['Config']['Image'],
+ 'ImageId': host['Image'],
+ })
+ self.get_results['/containers/{0}/json'.format(host['Name'])] = host
+ self.get_results['/containers/{0}/json'.format(host['Id'])] = host
+ self.get_results['/containers/json'] = list_reply
+
+ def get_json(self, url, *param, **kwargs):
+ url = url.format(*param)
+ return self.get_results[url]
+
+
+def test_populate(inventory, mocker):
+ client = FakeClient(LOVING_THARP)
+
+ inventory.get_option = mocker.MagicMock(side_effect=create_get_option({
+ 'verbose_output': True,
+ 'connection_type': 'docker-api',
+ 'add_legacy_groups': False,
+ 'compose': {},
+ 'groups': {},
+ 'keyed_groups': {},
+ }))
+ inventory._populate(client)
+
+ host_1 = inventory.inventory.get_host('loving_tharp')
+ host_1_vars = host_1.get_vars()
+
+ assert host_1_vars['ansible_host'] == 'loving_tharp'
+ assert host_1_vars['ansible_connection'] == 'community.docker.docker_api'
+ assert 'ansible_ssh_host' not in host_1_vars
+ assert 'ansible_ssh_port' not in host_1_vars
+ assert 'docker_state' in host_1_vars
+ assert 'docker_config' in host_1_vars
+ assert 'docker_image' in host_1_vars
+
+ assert len(inventory.inventory.groups['ungrouped'].hosts) == 0
+ assert len(inventory.inventory.groups['all'].hosts) == 0
+ assert len(inventory.inventory.groups) == 2
+ assert len(inventory.inventory.hosts) == 1
+
+
+def test_populate_service(inventory, mocker):
+ client = FakeClient(LOVING_THARP_SERVICE)
+
+ inventory.get_option = mocker.MagicMock(side_effect=create_get_option({
+ 'verbose_output': False,
+ 'connection_type': 'docker-cli',
+ 'add_legacy_groups': True,
+ 'compose': {},
+ 'groups': {},
+ 'keyed_groups': {},
+ 'docker_host': 'unix://var/run/docker.sock',
+ }))
+ inventory._populate(client)
+
+ host_1 = inventory.inventory.get_host('loving_tharp')
+ host_1_vars = host_1.get_vars()
+
+ assert host_1_vars['ansible_host'] == 'loving_tharp'
+ assert host_1_vars['ansible_connection'] == 'community.docker.docker'
+ assert 'ansible_ssh_host' not in host_1_vars
+ assert 'ansible_ssh_port' not in host_1_vars
+ assert 'docker_state' not in host_1_vars
+ assert 'docker_config' not in host_1_vars
+ assert 'docker_image' not in host_1_vars
+
+ assert len(inventory.inventory.groups['ungrouped'].hosts) == 0
+ assert len(inventory.inventory.groups['all'].hosts) == 0
+ assert len(inventory.inventory.groups['7bd547963679e'].hosts) == 1
+ assert len(inventory.inventory.groups['7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a'].hosts) == 1
+ assert len(inventory.inventory.groups['image_quay.io/ansible/ubuntu1804-test-container:1.21.0'].hosts) == 1
+ assert len(inventory.inventory.groups['loving_tharp'].hosts) == 1
+ assert len(inventory.inventory.groups['running'].hosts) == 1
+ assert len(inventory.inventory.groups['stopped'].hosts) == 0
+ assert len(inventory.inventory.groups['service_my_service'].hosts) == 1
+ assert len(inventory.inventory.groups['unix://var/run/docker.sock'].hosts) == 1
+ assert len(inventory.inventory.groups) == 10
+ assert len(inventory.inventory.hosts) == 1
+
+
+def test_populate_stack(inventory, mocker):
+ client = FakeClient(LOVING_THARP_STACK)
+
+ inventory.get_option = mocker.MagicMock(side_effect=create_get_option({
+ 'verbose_output': False,
+ 'connection_type': 'ssh',
+ 'add_legacy_groups': True,
+ 'compose': {},
+ 'groups': {},
+ 'keyed_groups': {},
+ 'docker_host': 'unix://var/run/docker.sock',
+ 'default_ip': '127.0.0.1',
+ 'private_ssh_port': 22,
+ }))
+ inventory._populate(client)
+
+ host_1 = inventory.inventory.get_host('loving_tharp')
+ host_1_vars = host_1.get_vars()
+
+ assert host_1_vars['ansible_ssh_host'] == '127.0.0.1'
+ assert host_1_vars['ansible_ssh_port'] == '32802'
+ assert 'ansible_host' not in host_1_vars
+ assert 'ansible_connection' not in host_1_vars
+ assert 'docker_state' not in host_1_vars
+ assert 'docker_config' not in host_1_vars
+ assert 'docker_image' not in host_1_vars
+
+ assert len(inventory.inventory.groups['ungrouped'].hosts) == 0
+ assert len(inventory.inventory.groups['all'].hosts) == 0
+ assert len(inventory.inventory.groups['7bd547963679e'].hosts) == 1
+ assert len(inventory.inventory.groups['7bd547963679e3209cafd52aff21840b755c96fd37abcd7a6e19da8da6a7f49a'].hosts) == 1
+ assert len(inventory.inventory.groups['image_quay.io/ansible/ubuntu1804-test-container:1.21.0'].hosts) == 1
+ assert len(inventory.inventory.groups['loving_tharp'].hosts) == 1
+ assert len(inventory.inventory.groups['running'].hosts) == 1
+ assert len(inventory.inventory.groups['stopped'].hosts) == 0
+ assert len(inventory.inventory.groups['stack_my_stack'].hosts) == 1
+ assert len(inventory.inventory.groups['unix://var/run/docker.sock'].hosts) == 1
+ assert len(inventory.inventory.groups) == 10
+ assert len(inventory.inventory.hosts) == 1
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/api/test_client.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/api/test_client.py
new file mode 100644
index 00000000..ea003565
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/api/test_client.py
@@ -0,0 +1,702 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import datetime
+import io
+import json
+import os
+import re
+import shutil
+import socket
+import struct
+import tempfile
+import threading
+import time
+import unittest
+import sys
+
+from ansible.module_utils import six
+
+import pytest
+import requests
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api import constants, errors
+from ansible_collections.community.docker.plugins.module_utils._api.api.client import APIClient
+from ansible_collections.community.docker.plugins.module_utils._api.constants import DEFAULT_DOCKER_API_VERSION
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import convert_filters
+from requests.packages import urllib3
+
+from .. import fake_api
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+DEFAULT_TIMEOUT_SECONDS = constants.DEFAULT_TIMEOUT_SECONDS
+
+
+def response(status_code=200, content='', headers=None, reason=None, elapsed=0,
+ request=None, raw=None):
+ res = requests.Response()
+ res.status_code = status_code
+ if not isinstance(content, six.binary_type):
+ content = json.dumps(content).encode('ascii')
+ res._content = content
+ res.headers = requests.structures.CaseInsensitiveDict(headers or {})
+ res.reason = reason
+ res.elapsed = datetime.timedelta(elapsed)
+ res.request = request
+ res.raw = raw
+ return res
+
+
+def fake_resolve_authconfig(authconfig, registry=None, *args, **kwargs):
+ return None
+
+
+def fake_inspect_container(self, container, tty=False):
+ return fake_api.get_fake_inspect_container(tty=tty)[1]
+
+
+def fake_resp(method, url, *args, **kwargs):
+ key = None
+ if url in fake_api.fake_responses:
+ key = url
+ elif (url, method) in fake_api.fake_responses:
+ key = (url, method)
+ if not key:
+ raise Exception('{method} {url}'.format(method=method, url=url))
+ status_code, content = fake_api.fake_responses[key]()
+ return response(status_code=status_code, content=content)
+
+
+fake_request = mock.Mock(side_effect=fake_resp)
+
+
+def fake_get(self, url, *args, **kwargs):
+ return fake_request('GET', url, *args, **kwargs)
+
+
+def fake_post(self, url, *args, **kwargs):
+ return fake_request('POST', url, *args, **kwargs)
+
+
+def fake_put(self, url, *args, **kwargs):
+ return fake_request('PUT', url, *args, **kwargs)
+
+
+def fake_delete(self, url, *args, **kwargs):
+ return fake_request('DELETE', url, *args, **kwargs)
+
+
+def fake_read_from_socket(self, response, stream, tty=False, demux=False):
+ return six.binary_type()
+
+
+url_base = '{prefix}/'.format(prefix=fake_api.prefix)
+url_prefix = '{0}v{1}/'.format(
+ url_base,
+ constants.DEFAULT_DOCKER_API_VERSION)
+
+
+class BaseAPIClientTest(unittest.TestCase):
+ def setUp(self):
+ self.patcher = mock.patch.multiple(
+ 'ansible_collections.community.docker.plugins.module_utils._api.api.client.APIClient',
+ get=fake_get,
+ post=fake_post,
+ put=fake_put,
+ delete=fake_delete,
+ _read_from_socket=fake_read_from_socket
+ )
+ self.patcher.start()
+ self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
+
+ def tearDown(self):
+ self.client.close()
+ self.patcher.stop()
+
+ def base_create_payload(self, img='busybox', cmd=None):
+ if not cmd:
+ cmd = ['true']
+ return {"Tty": False, "Image": img, "Cmd": cmd,
+ "AttachStdin": False,
+ "AttachStderr": True, "AttachStdout": True,
+ "StdinOnce": False,
+ "OpenStdin": False, "NetworkDisabled": False,
+ }
+
+
+class DockerApiTest(BaseAPIClientTest):
+ def test_ctor(self):
+ with pytest.raises(errors.DockerException) as excinfo:
+ APIClient(version=1.12)
+
+ assert str(
+ excinfo.value
+ ) == 'Version parameter must be a string or None. Found float'
+
+ def test_url_valid_resource(self):
+ url = self.client._url('/hello/{0}/world', 'somename')
+ assert url == '{0}{1}'.format(url_prefix, 'hello/somename/world')
+
+ url = self.client._url(
+ '/hello/{0}/world/{1}', 'somename', 'someothername'
+ )
+ assert url == '{0}{1}'.format(
+ url_prefix, 'hello/somename/world/someothername'
+ )
+
+ url = self.client._url('/hello/{0}/world', 'some?name')
+ assert url == '{0}{1}'.format(url_prefix, 'hello/some%3Fname/world')
+
+ url = self.client._url("/images/{0}/push", "localhost:5000/image")
+ assert url == '{0}{1}'.format(
+ url_prefix, 'images/localhost:5000/image/push'
+ )
+
+ def test_url_invalid_resource(self):
+ with pytest.raises(ValueError):
+ self.client._url('/hello/{0}/world', ['sakuya', 'izayoi'])
+
+ def test_url_no_resource(self):
+ url = self.client._url('/simple')
+ assert url == '{0}{1}'.format(url_prefix, 'simple')
+
+ def test_url_unversioned_api(self):
+ url = self.client._url(
+ '/hello/{0}/world', 'somename', versioned_api=False
+ )
+ assert url == '{0}{1}'.format(url_base, 'hello/somename/world')
+
+ def test_version(self):
+ self.client.version()
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'version',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_version_no_api_version(self):
+ self.client.version(False)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_base + 'version',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_retrieve_server_version(self):
+ client = APIClient(version="auto")
+ assert isinstance(client._version, six.string_types)
+ assert not (client._version == "auto")
+ client.close()
+
+ def test_auto_retrieve_server_version(self):
+ version = self.client._retrieve_server_version()
+ assert isinstance(version, six.string_types)
+
+ def test_info(self):
+ self.client.info()
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'info',
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_search(self):
+ self.client.get_json('/images/search', params={'term': 'busybox'})
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'images/search',
+ params={'term': 'busybox'},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_login(self):
+ self.client.login('sakuya', 'izayoi')
+ args = fake_request.call_args
+ assert args[0][0] == 'POST'
+ assert args[0][1] == url_prefix + 'auth'
+ assert json.loads(args[1]['data']) == {
+ 'username': 'sakuya', 'password': 'izayoi'
+ }
+ assert args[1]['headers'] == {'Content-Type': 'application/json'}
+ assert self.client._auth_configs.auths['docker.io'] == {
+ 'email': None,
+ 'password': 'izayoi',
+ 'username': 'sakuya',
+ 'serveraddress': None,
+ }
+
+ def test_events(self):
+ self.client.events()
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'events',
+ params={'since': None, 'until': None, 'filters': None},
+ stream=True,
+ timeout=None
+ )
+
+ def test_events_with_since_until(self):
+ ts = 1356048000
+ now = datetime.datetime.utcfromtimestamp(ts)
+ since = now - datetime.timedelta(seconds=10)
+ until = now + datetime.timedelta(seconds=10)
+
+ self.client.events(since=since, until=until)
+
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'events',
+ params={
+ 'since': ts - 10,
+ 'until': ts + 10,
+ 'filters': None
+ },
+ stream=True,
+ timeout=None
+ )
+
+ def test_events_with_filters(self):
+ filters = {'event': ['die', 'stop'],
+ 'container': fake_api.FAKE_CONTAINER_ID}
+
+ self.client.events(filters=filters)
+
+ expected_filters = convert_filters(filters)
+ fake_request.assert_called_with(
+ 'GET',
+ url_prefix + 'events',
+ params={
+ 'since': None,
+ 'until': None,
+ 'filters': expected_filters
+ },
+ stream=True,
+ timeout=None
+ )
+
+ def _socket_path_for_client_session(self, client):
+ socket_adapter = client.get_adapter('http+docker://')
+ return socket_adapter.socket_path
+
+ def test_url_compatibility_unix(self):
+ c = APIClient(
+ base_url="unix://socket",
+ version=DEFAULT_DOCKER_API_VERSION)
+
+ assert self._socket_path_for_client_session(c) == '/socket'
+
+ def test_url_compatibility_unix_triple_slash(self):
+ c = APIClient(
+ base_url="unix:///socket",
+ version=DEFAULT_DOCKER_API_VERSION)
+
+ assert self._socket_path_for_client_session(c) == '/socket'
+
+ def test_url_compatibility_http_unix_triple_slash(self):
+ c = APIClient(
+ base_url="http+unix:///socket",
+ version=DEFAULT_DOCKER_API_VERSION)
+
+ assert self._socket_path_for_client_session(c) == '/socket'
+
+ def test_url_compatibility_http(self):
+ c = APIClient(
+ base_url="http://hostname:1234",
+ version=DEFAULT_DOCKER_API_VERSION)
+
+ assert c.base_url == "http://hostname:1234"
+
+ def test_url_compatibility_tcp(self):
+ c = APIClient(
+ base_url="tcp://hostname:1234",
+ version=DEFAULT_DOCKER_API_VERSION)
+
+ assert c.base_url == "http://hostname:1234"
+
+ def test_remove_link(self):
+ self.client.delete_call('/containers/{0}', '3cc2351ab11b', params={'v': False, 'link': True, 'force': False})
+
+ fake_request.assert_called_with(
+ 'DELETE',
+ url_prefix + 'containers/3cc2351ab11b',
+ params={'v': False, 'link': True, 'force': False},
+ timeout=DEFAULT_TIMEOUT_SECONDS
+ )
+
+ def test_stream_helper_decoding(self):
+ status_code, content = fake_api.fake_responses[url_prefix + 'events']()
+ content_str = json.dumps(content)
+ if six.PY3:
+ content_str = content_str.encode('utf-8')
+ body = io.BytesIO(content_str)
+
+ # mock a stream interface
+ raw_resp = urllib3.HTTPResponse(body=body)
+ setattr(raw_resp._fp, 'chunked', True)
+ setattr(raw_resp._fp, 'chunk_left', len(body.getvalue()) - 1)
+
+ # pass `decode=False` to the helper
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp))
+ assert result == content_str
+
+ # pass `decode=True` to the helper
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp, decode=True))
+ assert result == content
+
+ # non-chunked response, pass `decode=False` to the helper
+ setattr(raw_resp._fp, 'chunked', False)
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp))
+ assert result == content_str.decode('utf-8')
+
+ # non-chunked response, pass `decode=True` to the helper
+ raw_resp._fp.seek(0)
+ resp = response(status_code=status_code, content=content, raw=raw_resp)
+ result = next(self.client._stream_helper(resp, decode=True))
+ assert result == content
+
+
+class UnixSocketStreamTest(unittest.TestCase):
+ def setUp(self):
+ socket_dir = tempfile.mkdtemp()
+ self.build_context = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, socket_dir)
+ self.addCleanup(shutil.rmtree, self.build_context)
+ self.socket_file = os.path.join(socket_dir, 'test_sock.sock')
+ self.server_socket = self._setup_socket()
+ self.stop_server = False
+ server_thread = threading.Thread(target=self.run_server)
+ server_thread.daemon = True
+ server_thread.start()
+ self.response = None
+ self.request_handler = None
+ self.addCleanup(server_thread.join)
+ self.addCleanup(self.stop)
+
+ def stop(self):
+ self.stop_server = True
+
+ def _setup_socket(self):
+ server_sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ server_sock.bind(self.socket_file)
+ # Non-blocking mode so that we can shut the test down easily
+ server_sock.setblocking(0)
+ server_sock.listen(5)
+ return server_sock
+
+ def run_server(self):
+ try:
+ while not self.stop_server:
+ try:
+ connection, client_address = self.server_socket.accept()
+ except socket.error:
+ # Probably no connection to accept yet
+ time.sleep(0.01)
+ continue
+
+ connection.setblocking(1)
+ try:
+ self.request_handler(connection)
+ finally:
+ connection.close()
+ finally:
+ self.server_socket.close()
+
+ def early_response_sending_handler(self, connection):
+ data = b''
+ headers = None
+
+ connection.sendall(self.response)
+ while not headers:
+ data += connection.recv(2048)
+ parts = data.split(b'\r\n\r\n', 1)
+ if len(parts) == 2:
+ headers, data = parts
+
+ mo = re.search(r'Content-Length: ([0-9]+)', headers.decode())
+ assert mo
+ content_length = int(mo.group(1))
+
+ while True:
+ if len(data) >= content_length:
+ break
+
+ data += connection.recv(2048)
+
+ @pytest.mark.skipif(
+ constants.IS_WINDOWS_PLATFORM, reason='Unix only'
+ )
+ def test_early_stream_response(self):
+ self.request_handler = self.early_response_sending_handler
+ lines = []
+ for i in range(0, 50):
+ line = str(i).encode()
+ lines += [('%x' % len(line)).encode(), line]
+ lines.append(b'0')
+ lines.append(b'')
+
+ self.response = (
+ b'HTTP/1.1 200 OK\r\n'
+ b'Transfer-Encoding: chunked\r\n'
+ b'\r\n'
+ ) + b'\r\n'.join(lines)
+
+ with APIClient(
+ base_url="http+unix://" + self.socket_file,
+ version=DEFAULT_DOCKER_API_VERSION) as client:
+ for i in range(5):
+ try:
+ params = {
+ 't': None,
+ 'remote': None,
+ 'q': False,
+ 'nocache': False,
+ 'rm': False,
+ 'forcerm': False,
+ 'pull': False,
+ 'dockerfile': 'Dockerfile',
+ }
+ headers = {'Content-Type': 'application/tar'}
+ data = b'...'
+ response = client._post(client._url('/build'), params=params, headers=headers, data=data, stream=True)
+ stream = client._stream_helper(response, decode=False)
+ break
+ except requests.ConnectionError as e:
+ if i == 4:
+ raise e
+
+ assert list(stream) == [
+ str(i).encode() for i in range(50)
+ ]
+
+
+@pytest.mark.skip(
+ 'This test requires starting a networking server and tries to access it. '
+ 'This does not work with network separation with Docker-based unit tests, '
+ 'but it does work with podman-based unit tests.'
+)
+class TCPSocketStreamTest(unittest.TestCase):
+ stdout_data = b'''
+ Now, those children out there, they're jumping through the
+ flames in the hope that the god of the fire will make them fruitful.
+ Really, you can't blame them. After all, what girl would not prefer the
+ child of a god to that of some acne-scarred artisan?
+ '''
+ stderr_data = b'''
+ And what of the true God? To whose glory churches and monasteries have been
+ built on these islands for generations past? Now shall what of Him?
+ '''
+
+ @classmethod
+ def setup_class(cls):
+ cls.server = six.moves.socketserver.ThreadingTCPServer(
+ ('', 0), cls.get_handler_class())
+ cls.thread = threading.Thread(target=cls.server.serve_forever)
+ cls.thread.daemon = True
+ cls.thread.start()
+ cls.address = 'http://{0}:{1}'.format(
+ socket.gethostname(), cls.server.server_address[1])
+
+ @classmethod
+ def teardown_class(cls):
+ cls.server.shutdown()
+ cls.server.server_close()
+ cls.thread.join()
+
+ @classmethod
+ def get_handler_class(cls):
+ stdout_data = cls.stdout_data
+ stderr_data = cls.stderr_data
+
+ class Handler(six.moves.BaseHTTPServer.BaseHTTPRequestHandler, object):
+ def do_POST(self):
+ resp_data = self.get_resp_data()
+ self.send_response(101)
+ self.send_header(
+ 'Content-Type', 'application/vnd.docker.raw-stream')
+ self.send_header('Connection', 'Upgrade')
+ self.send_header('Upgrade', 'tcp')
+ self.end_headers()
+ self.wfile.flush()
+ time.sleep(0.2)
+ self.wfile.write(resp_data)
+ self.wfile.flush()
+
+ def get_resp_data(self):
+ path = self.path.split('/')[-1]
+ if path == 'tty':
+ return stdout_data + stderr_data
+ elif path == 'no-tty':
+ data = b''
+ data += self.frame_header(1, stdout_data)
+ data += stdout_data
+ data += self.frame_header(2, stderr_data)
+ data += stderr_data
+ return data
+ else:
+ raise Exception('Unknown path {path}'.format(path=path))
+
+ @staticmethod
+ def frame_header(stream, data):
+ return struct.pack('>BxxxL', stream, len(data))
+
+ return Handler
+
+ def request(self, stream=None, tty=None, demux=None):
+ assert stream is not None and tty is not None and demux is not None
+ with APIClient(
+ base_url=self.address,
+ version=DEFAULT_DOCKER_API_VERSION,
+ ) as client:
+ if tty:
+ url = client._url('/tty')
+ else:
+ url = client._url('/no-tty')
+ resp = client._post(url, stream=True)
+ return client._read_from_socket(
+ resp, stream=stream, tty=tty, demux=demux)
+
+ def test_read_from_socket_tty(self):
+ res = self.request(stream=True, tty=True, demux=False)
+ assert next(res) == self.stdout_data + self.stderr_data
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_tty_demux(self):
+ res = self.request(stream=True, tty=True, demux=True)
+ assert next(res) == (self.stdout_data + self.stderr_data, None)
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_no_tty(self):
+ res = self.request(stream=True, tty=False, demux=False)
+ assert next(res) == self.stdout_data
+ assert next(res) == self.stderr_data
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_no_tty_demux(self):
+ res = self.request(stream=True, tty=False, demux=True)
+ assert (self.stdout_data, None) == next(res)
+ assert (None, self.stderr_data) == next(res)
+ with self.assertRaises(StopIteration):
+ next(res)
+
+ def test_read_from_socket_no_stream_tty(self):
+ res = self.request(stream=False, tty=True, demux=False)
+ assert res == self.stdout_data + self.stderr_data
+
+ def test_read_from_socket_no_stream_tty_demux(self):
+ res = self.request(stream=False, tty=True, demux=True)
+ assert res == (self.stdout_data + self.stderr_data, None)
+
+ def test_read_from_socket_no_stream_no_tty(self):
+ res = self.request(stream=False, tty=False, demux=False)
+ res == self.stdout_data + self.stderr_data
+
+ def test_read_from_socket_no_stream_no_tty_demux(self):
+ res = self.request(stream=False, tty=False, demux=True)
+ assert res == (self.stdout_data, self.stderr_data)
+
+
+class UserAgentTest(unittest.TestCase):
+ def setUp(self):
+ self.patcher = mock.patch.object(
+ APIClient,
+ 'send',
+ return_value=fake_resp("GET", "%s/version" % fake_api.prefix)
+ )
+ self.mock_send = self.patcher.start()
+
+ def tearDown(self):
+ self.patcher.stop()
+
+ def test_default_user_agent(self):
+ client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
+ client.version()
+
+ assert self.mock_send.call_count == 1
+ headers = self.mock_send.call_args[0][0].headers
+ expected = 'ansible-community.docker'
+ assert headers['User-Agent'] == expected
+
+ def test_custom_user_agent(self):
+ client = APIClient(
+ user_agent='foo/bar',
+ version=DEFAULT_DOCKER_API_VERSION)
+ client.version()
+
+ assert self.mock_send.call_count == 1
+ headers = self.mock_send.call_args[0][0].headers
+ assert headers['User-Agent'] == 'foo/bar'
+
+
+class DisableSocketTest(unittest.TestCase):
+ class DummySocket:
+ def __init__(self, timeout=60):
+ self.timeout = timeout
+
+ def settimeout(self, timeout):
+ self.timeout = timeout
+
+ def gettimeout(self):
+ return self.timeout
+
+ def setUp(self):
+ self.client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
+
+ def test_disable_socket_timeout(self):
+ """Test that the timeout is disabled on a generic socket object."""
+ socket = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ assert socket.timeout is None
+
+ def test_disable_socket_timeout2(self):
+ """Test that the timeouts are disabled on a generic socket object
+ and it's _sock object if present."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket()
+
+ self.client._disable_socket_timeout(socket)
+
+ assert socket.timeout is None
+ assert socket._sock.timeout is None
+
+ def test_disable_socket_timout_non_blocking(self):
+ """Test that a non-blocking socket does not get set to blocking."""
+ socket = self.DummySocket()
+ socket._sock = self.DummySocket(0.0)
+
+ self.client._disable_socket_timeout(socket)
+
+ assert socket.timeout is None
+ assert socket._sock.timeout == 0.0
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/fake_api.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/fake_api.py
new file mode 100644
index 00000000..b794ff3d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/fake_api.py
@@ -0,0 +1,668 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible_collections.community.docker.plugins.module_utils._api import constants
+
+from . import fake_stat
+
+CURRENT_VERSION = 'v{api_version}'.format(api_version=constants.DEFAULT_DOCKER_API_VERSION)
+
+FAKE_CONTAINER_ID = '3cc2351ab11b'
+FAKE_IMAGE_ID = 'e9aa60c60128'
+FAKE_EXEC_ID = 'd5d177f121dc'
+FAKE_NETWORK_ID = '33fb6a3462b8'
+FAKE_IMAGE_NAME = 'test_image'
+FAKE_TARBALL_PATH = '/path/to/tarball'
+FAKE_REPO_NAME = 'repo'
+FAKE_TAG_NAME = 'tag'
+FAKE_FILE_NAME = 'file'
+FAKE_URL = 'myurl'
+FAKE_PATH = '/path'
+FAKE_VOLUME_NAME = 'perfectcherryblossom'
+FAKE_NODE_ID = '24ifsmvkjbyhk'
+FAKE_SECRET_ID = 'epdyrw4tsi03xy3deu8g8ly6o'
+FAKE_SECRET_NAME = 'super_secret'
+
+# Each method is prefixed with HTTP method (get, post...)
+# for clarity and readability
+
+
+def get_fake_version():
+ status_code = 200
+ response = {
+ 'ApiVersion': '1.35',
+ 'Arch': 'amd64',
+ 'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
+ 'Components': [{
+ 'Details': {
+ 'ApiVersion': '1.35',
+ 'Arch': 'amd64',
+ 'BuildTime': '2018-01-10T20:09:37.000000000+00:00',
+ 'Experimental': 'false',
+ 'GitCommit': '03596f5',
+ 'GoVersion': 'go1.9.2',
+ 'KernelVersion': '4.4.0-112-generic',
+ 'MinAPIVersion': '1.12',
+ 'Os': 'linux'
+ },
+ 'Name': 'Engine',
+ 'Version': '18.01.0-ce'
+ }],
+ 'GitCommit': '03596f5',
+ 'GoVersion': 'go1.9.2',
+ 'KernelVersion': '4.4.0-112-generic',
+ 'MinAPIVersion': '1.12',
+ 'Os': 'linux',
+ 'Platform': {'Name': ''},
+ 'Version': '18.01.0-ce'
+ }
+
+ return status_code, response
+
+
+def get_fake_info():
+ status_code = 200
+ response = {'Containers': 1, 'Images': 1, 'Debug': False,
+ 'MemoryLimit': False, 'SwapLimit': False,
+ 'IPv4Forwarding': True}
+ return status_code, response
+
+
+def post_fake_auth():
+ status_code = 200
+ response = {'Status': 'Login Succeeded',
+ 'IdentityToken': '9cbaf023786cd7'}
+ return status_code, response
+
+
+def get_fake_ping():
+ return 200, "OK"
+
+
+def get_fake_search():
+ status_code = 200
+ response = [{'Name': 'busybox', 'Description': 'Fake Description'}]
+ return status_code, response
+
+
+def get_fake_images():
+ status_code = 200
+ response = [{
+ 'Id': FAKE_IMAGE_ID,
+ 'Created': '2 days ago',
+ 'Repository': 'busybox',
+ 'RepoTags': ['busybox:latest', 'busybox:1.0'],
+ }]
+ return status_code, response
+
+
+def get_fake_image_history():
+ status_code = 200
+ response = [
+ {
+ "Id": "b750fe79269d",
+ "Created": 1364102658,
+ "CreatedBy": "/bin/bash"
+ },
+ {
+ "Id": "27cf78414709",
+ "Created": 1364068391,
+ "CreatedBy": ""
+ }
+ ]
+
+ return status_code, response
+
+
+def post_fake_import_image():
+ status_code = 200
+ response = 'Import messages...'
+
+ return status_code, response
+
+
+def get_fake_containers():
+ status_code = 200
+ response = [{
+ 'Id': FAKE_CONTAINER_ID,
+ 'Image': 'busybox:latest',
+ 'Created': '2 days ago',
+ 'Command': 'true',
+ 'Status': 'fake status'
+ }]
+ return status_code, response
+
+
+def post_fake_start_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_resize_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_create_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def get_fake_inspect_container(tty=False):
+ status_code = 200
+ response = {
+ 'Id': FAKE_CONTAINER_ID,
+ 'Config': {'Labels': {'foo': 'bar'}, 'Privileged': True, 'Tty': tty},
+ 'ID': FAKE_CONTAINER_ID,
+ 'Image': 'busybox:latest',
+ 'Name': 'foobar',
+ "State": {
+ "Status": "running",
+ "Running": True,
+ "Pid": 0,
+ "ExitCode": 0,
+ "StartedAt": "2013-09-25T14:01:18.869545111+02:00",
+ "Ghost": False
+ },
+ "HostConfig": {
+ "LogConfig": {
+ "Type": "json-file",
+ "Config": {}
+ },
+ },
+ "MacAddress": "02:42:ac:11:00:0a"
+ }
+ return status_code, response
+
+
+def get_fake_inspect_image():
+ status_code = 200
+ response = {
+ 'Id': FAKE_IMAGE_ID,
+ 'Parent': "27cf784147099545",
+ 'Created': "2013-03-23T22:24:18.818426-07:00",
+ 'Container': FAKE_CONTAINER_ID,
+ 'Config': {'Labels': {'bar': 'foo'}},
+ 'ContainerConfig':
+ {
+ "Hostname": "",
+ "User": "",
+ "Memory": 0,
+ "MemorySwap": 0,
+ "AttachStdin": False,
+ "AttachStdout": False,
+ "AttachStderr": False,
+ "PortSpecs": "",
+ "Tty": True,
+ "OpenStdin": True,
+ "StdinOnce": False,
+ "Env": "",
+ "Cmd": ["/bin/bash"],
+ "Dns": "",
+ "Image": "base",
+ "Volumes": "",
+ "VolumesFrom": "",
+ "WorkingDir": ""
+ },
+ 'Size': 6823592
+ }
+ return status_code, response
+
+
+def get_fake_insert_image():
+ status_code = 200
+ response = {'StatusCode': 0}
+ return status_code, response
+
+
+def get_fake_wait():
+ status_code = 200
+ response = {'StatusCode': 0}
+ return status_code, response
+
+
+def get_fake_logs():
+ status_code = 200
+ response = (b'\x01\x00\x00\x00\x00\x00\x00\x00'
+ b'\x02\x00\x00\x00\x00\x00\x00\x00'
+ b'\x01\x00\x00\x00\x00\x00\x00\x11Flowering Nights\n'
+ b'\x01\x00\x00\x00\x00\x00\x00\x10(Sakuya Iyazoi)\n')
+ return status_code, response
+
+
+def get_fake_diff():
+ status_code = 200
+ response = [{'Path': '/test', 'Kind': 1}]
+ return status_code, response
+
+
+def get_fake_events():
+ status_code = 200
+ response = [{'status': 'stop', 'id': FAKE_CONTAINER_ID,
+ 'from': FAKE_IMAGE_ID, 'time': 1423247867}]
+ return status_code, response
+
+
+def get_fake_export():
+ status_code = 200
+ response = 'Byte Stream....'
+ return status_code, response
+
+
+def post_fake_exec_create():
+ status_code = 200
+ response = {'Id': FAKE_EXEC_ID}
+ return status_code, response
+
+
+def post_fake_exec_start():
+ status_code = 200
+ response = (b'\x01\x00\x00\x00\x00\x00\x00\x11bin\nboot\ndev\netc\n'
+ b'\x01\x00\x00\x00\x00\x00\x00\x12lib\nmnt\nproc\nroot\n'
+ b'\x01\x00\x00\x00\x00\x00\x00\x0csbin\nusr\nvar\n')
+ return status_code, response
+
+
+def post_fake_exec_resize():
+ status_code = 201
+ return status_code, ''
+
+
+def get_fake_exec_inspect():
+ return 200, {
+ 'OpenStderr': True,
+ 'OpenStdout': True,
+ 'Container': get_fake_inspect_container()[1],
+ 'Running': False,
+ 'ProcessConfig': {
+ 'arguments': ['hello world'],
+ 'tty': False,
+ 'entrypoint': 'echo',
+ 'privileged': False,
+ 'user': ''
+ },
+ 'ExitCode': 0,
+ 'ID': FAKE_EXEC_ID,
+ 'OpenStdin': False
+ }
+
+
+def post_fake_stop_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_kill_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_pause_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_unpause_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_restart_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_rename_container():
+ status_code = 204
+ return status_code, None
+
+
+def delete_fake_remove_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_image_create():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def delete_fake_remove_image():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def get_fake_get_image():
+ status_code = 200
+ response = 'Byte Stream....'
+ return status_code, response
+
+
+def post_fake_load_image():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def post_fake_commit():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_push():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def post_fake_build_container():
+ status_code = 200
+ response = {'Id': FAKE_CONTAINER_ID}
+ return status_code, response
+
+
+def post_fake_tag_image():
+ status_code = 200
+ response = {'Id': FAKE_IMAGE_ID}
+ return status_code, response
+
+
+def get_fake_stats():
+ status_code = 200
+ response = fake_stat.OBJ
+ return status_code, response
+
+
+def get_fake_top():
+ return 200, {
+ 'Processes': [
+ [
+ 'root',
+ '26501',
+ '6907',
+ '0',
+ '10:32',
+ 'pts/55',
+ '00:00:00',
+ 'sleep 60',
+ ],
+ ],
+ 'Titles': [
+ 'UID',
+ 'PID',
+ 'PPID',
+ 'C',
+ 'STIME',
+ 'TTY',
+ 'TIME',
+ 'CMD',
+ ],
+ }
+
+
+def get_fake_volume_list():
+ status_code = 200
+ response = {
+ 'Volumes': [
+ {
+ 'Name': 'perfectcherryblossom',
+ 'Driver': 'local',
+ 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
+ 'Scope': 'local'
+ }, {
+ 'Name': 'subterraneananimism',
+ 'Driver': 'local',
+ 'Mountpoint': '/var/lib/docker/volumes/subterraneananimism',
+ 'Scope': 'local'
+ }
+ ]
+ }
+ return status_code, response
+
+
+def get_fake_volume():
+ status_code = 200
+ response = {
+ 'Name': 'perfectcherryblossom',
+ 'Driver': 'local',
+ 'Mountpoint': '/var/lib/docker/volumes/perfectcherryblossom',
+ 'Labels': {
+ 'com.example.some-label': 'some-value'
+ },
+ 'Scope': 'local'
+ }
+ return status_code, response
+
+
+def fake_remove_volume():
+ return 204, None
+
+
+def post_fake_update_container():
+ return 200, {'Warnings': []}
+
+
+def post_fake_update_node():
+ return 200, None
+
+
+def post_fake_join_swarm():
+ return 200, None
+
+
+def get_fake_network_list():
+ return 200, [{
+ "Name": "bridge",
+ "Id": FAKE_NETWORK_ID,
+ "Scope": "local",
+ "Driver": "bridge",
+ "EnableIPv6": False,
+ "Internal": False,
+ "IPAM": {
+ "Driver": "default",
+ "Config": [
+ {
+ "Subnet": "172.17.0.0/16"
+ }
+ ]
+ },
+ "Containers": {
+ FAKE_CONTAINER_ID: {
+ "EndpointID": "ed2419a97c1d99",
+ "MacAddress": "02:42:ac:11:00:02",
+ "IPv4Address": "172.17.0.2/16",
+ "IPv6Address": ""
+ }
+ },
+ "Options": {
+ "com.docker.network.bridge.default_bridge": "true",
+ "com.docker.network.bridge.enable_icc": "true",
+ "com.docker.network.bridge.enable_ip_masquerade": "true",
+ "com.docker.network.bridge.host_binding_ipv4": "0.0.0.0",
+ "com.docker.network.bridge.name": "docker0",
+ "com.docker.network.driver.mtu": "1500"
+ }
+ }]
+
+
+def get_fake_network():
+ return 200, get_fake_network_list()[1][0]
+
+
+def post_fake_network():
+ return 201, {"Id": FAKE_NETWORK_ID, "Warnings": []}
+
+
+def delete_fake_network():
+ return 204, None
+
+
+def post_fake_network_connect():
+ return 200, None
+
+
+def post_fake_network_disconnect():
+ return 200, None
+
+
+def post_fake_secret():
+ status_code = 200
+ response = {'ID': FAKE_SECRET_ID}
+ return status_code, response
+
+
+# Maps real api url to fake response callback
+prefix = 'http+docker://localhost'
+if constants.IS_WINDOWS_PLATFORM:
+ prefix = 'http+docker://localnpipe'
+
+fake_responses = {
+ '{prefix}/version'.format(prefix=prefix):
+ get_fake_version,
+ '{prefix}/{CURRENT_VERSION}/version'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_version,
+ '{prefix}/{CURRENT_VERSION}/info'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_info,
+ '{prefix}/{CURRENT_VERSION}/auth'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_auth,
+ '{prefix}/{CURRENT_VERSION}/_ping'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_ping,
+ '{prefix}/{CURRENT_VERSION}/images/search'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_search,
+ '{prefix}/{CURRENT_VERSION}/images/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_images,
+ '{prefix}/{CURRENT_VERSION}/images/test_image/history'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_image_history,
+ '{prefix}/{CURRENT_VERSION}/images/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_import_image,
+ '{prefix}/{CURRENT_VERSION}/containers/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_containers,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/start'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_start_container,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/resize'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_resize_container,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_inspect_container,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/rename'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_rename_container,
+ '{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/tag'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_tag_image,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/wait'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_wait,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/logs'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_logs,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/changes'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_diff,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/export'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_export,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/update'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_update_container,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/exec'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_exec_create,
+ '{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/start'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_exec_start,
+ '{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_exec_inspect,
+ '{prefix}/{CURRENT_VERSION}/exec/d5d177f121dc/resize'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_exec_resize,
+
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stats'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_stats,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/top'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_top,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/stop'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_stop_container,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/kill'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_kill_container,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/pause'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_pause_container,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/unpause'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_unpause_container,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b/restart'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_restart_container,
+ '{prefix}/{CURRENT_VERSION}/containers/3cc2351ab11b'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ delete_fake_remove_container,
+ '{prefix}/{CURRENT_VERSION}/images/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_image_create,
+ '{prefix}/{CURRENT_VERSION}/images/e9aa60c60128'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ delete_fake_remove_image,
+ '{prefix}/{CURRENT_VERSION}/images/e9aa60c60128/get'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_get_image,
+ '{prefix}/{CURRENT_VERSION}/images/load'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_load_image,
+ '{prefix}/{CURRENT_VERSION}/images/test_image/json'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_inspect_image,
+ '{prefix}/{CURRENT_VERSION}/images/test_image/insert'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_insert_image,
+ '{prefix}/{CURRENT_VERSION}/images/test_image/push'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_push,
+ '{prefix}/{CURRENT_VERSION}/commit'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_commit,
+ '{prefix}/{CURRENT_VERSION}/containers/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_create_container,
+ '{prefix}/{CURRENT_VERSION}/build'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_build_container,
+ '{prefix}/{CURRENT_VERSION}/events'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ get_fake_events,
+ ('{prefix}/{CURRENT_VERSION}/volumes'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'GET'):
+ get_fake_volume_list,
+ ('{prefix}/{CURRENT_VERSION}/volumes/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'POST'):
+ get_fake_volume,
+ ('{1}/{0}/volumes/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
+ ), 'GET'):
+ get_fake_volume,
+ ('{1}/{0}/volumes/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_VOLUME_NAME
+ ), 'DELETE'):
+ fake_remove_volume,
+ ('{1}/{0}/nodes/{2}/update?version=1'.format(
+ CURRENT_VERSION, prefix, FAKE_NODE_ID
+ ), 'POST'):
+ post_fake_update_node,
+ ('{prefix}/{CURRENT_VERSION}/swarm/join'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'POST'):
+ post_fake_join_swarm,
+ ('{prefix}/{CURRENT_VERSION}/networks'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'GET'):
+ get_fake_network_list,
+ ('{prefix}/{CURRENT_VERSION}/networks/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION), 'POST'):
+ post_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'GET'):
+ get_fake_network,
+ ('{1}/{0}/networks/{2}'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'DELETE'):
+ delete_fake_network,
+ ('{1}/{0}/networks/{2}/connect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_connect,
+ ('{1}/{0}/networks/{2}/disconnect'.format(
+ CURRENT_VERSION, prefix, FAKE_NETWORK_ID
+ ), 'POST'):
+ post_fake_network_disconnect,
+ '{prefix}/{CURRENT_VERSION}/secrets/create'.format(prefix=prefix, CURRENT_VERSION=CURRENT_VERSION):
+ post_fake_secret,
+}
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/fake_stat.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/fake_stat.py
new file mode 100644
index 00000000..97547328
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/fake_stat.py
@@ -0,0 +1,145 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+OBJ = {
+ "read": "2015-02-11T19:20:46.667237763+02:00",
+ "network": {
+ "rx_bytes": 567224,
+ "rx_packets": 3773,
+ "rx_errors": 0,
+ "rx_dropped": 0,
+ "tx_bytes": 1176,
+ "tx_packets": 13,
+ "tx_errors": 0,
+ "tx_dropped": 0
+ },
+ "cpu_stats": {
+ "cpu_usage": {
+ "total_usage": 157260874053,
+ "percpu_usage": [
+ 52196306950,
+ 24118413549,
+ 53292684398,
+ 27653469156
+ ],
+ "usage_in_kernelmode": 37140000000,
+ "usage_in_usermode": 62140000000
+ },
+ "system_cpu_usage": 3.0881377e+14,
+ "throttling_data": {
+ "periods": 0,
+ "throttled_periods": 0,
+ "throttled_time": 0
+ }
+ },
+ "memory_stats": {
+ "usage": 179314688,
+ "max_usage": 258166784,
+ "stats": {
+ "active_anon": 90804224,
+ "active_file": 2195456,
+ "cache": 3096576,
+ "hierarchical_memory_limit": 1.844674407371e+19,
+ "inactive_anon": 85516288,
+ "inactive_file": 798720,
+ "mapped_file": 2646016,
+ "pgfault": 101034,
+ "pgmajfault": 1207,
+ "pgpgin": 115814,
+ "pgpgout": 75613,
+ "rss": 176218112,
+ "rss_huge": 12582912,
+ "total_active_anon": 90804224,
+ "total_active_file": 2195456,
+ "total_cache": 3096576,
+ "total_inactive_anon": 85516288,
+ "total_inactive_file": 798720,
+ "total_mapped_file": 2646016,
+ "total_pgfault": 101034,
+ "total_pgmajfault": 1207,
+ "total_pgpgin": 115814,
+ "total_pgpgout": 75613,
+ "total_rss": 176218112,
+ "total_rss_huge": 12582912,
+ "total_unevictable": 0,
+ "total_writeback": 0,
+ "unevictable": 0,
+ "writeback": 0
+ },
+ "failcnt": 0,
+ "limit": 8039038976
+ },
+ "blkio_stats": {
+ "io_service_bytes_recursive": [
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Read",
+ "value": 72843264
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Write",
+ "value": 4096
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Sync",
+ "value": 4096
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Async",
+ "value": 72843264
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Total",
+ "value": 72847360
+ }
+ ],
+ "io_serviced_recursive": [
+ {
+ "major": 8,
+ "minor": 0,
+ "op": "Read",
+ "value": 10581
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Write",
+ "value": 1
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Sync",
+ "value": 1
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Async",
+ "value": 10581
+ }, {
+ "major": 8,
+ "minor": 0,
+ "op": "Total",
+ "value": 10582
+ }
+ ],
+ "io_queue_recursive": [],
+ "io_service_time_recursive": [],
+ "io_wait_time_recursive": [],
+ "io_merged_recursive": [],
+ "io_time_recursive": [],
+ "sectors_recursive": []
+ }
+}
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/test_auth.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/test_auth.py
new file mode 100644
index 00000000..b3b5a351
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/test_auth.py
@@ -0,0 +1,819 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import os
+import os.path
+import random
+import shutil
+import tempfile
+import unittest
+import sys
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api import auth, errors
+from ansible_collections.community.docker.plugins.module_utils._api.credentials.errors import CredentialsNotFound
+from ansible_collections.community.docker.plugins.module_utils._api.credentials.store import Store
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class RegressionTest(unittest.TestCase):
+ def test_803_urlsafe_encode(self):
+ auth_data = {
+ 'username': 'root',
+ 'password': 'GR?XGR?XGR?XGR?X'
+ }
+ encoded = auth.encode_header(auth_data)
+ assert b'/' not in encoded
+ assert b'_' in encoded
+
+
+class ResolveRepositoryNameTest(unittest.TestCase):
+ def test_resolve_repository_name_hub_library_image(self):
+ assert auth.resolve_repository_name('image') == (
+ 'docker.io', 'image'
+ )
+
+ def test_resolve_repository_name_dotted_hub_library_image(self):
+ assert auth.resolve_repository_name('image.valid') == (
+ 'docker.io', 'image.valid'
+ )
+
+ def test_resolve_repository_name_hub_image(self):
+ assert auth.resolve_repository_name('username/image') == (
+ 'docker.io', 'username/image'
+ )
+
+ def test_explicit_hub_index_library_image(self):
+ assert auth.resolve_repository_name('docker.io/image') == (
+ 'docker.io', 'image'
+ )
+
+ def test_explicit_legacy_hub_index_library_image(self):
+ assert auth.resolve_repository_name('index.docker.io/image') == (
+ 'docker.io', 'image'
+ )
+
+ def test_resolve_repository_name_private_registry(self):
+ assert auth.resolve_repository_name('my.registry.net/image') == (
+ 'my.registry.net', 'image'
+ )
+
+ def test_resolve_repository_name_private_registry_with_port(self):
+ assert auth.resolve_repository_name('my.registry.net:5000/image') == (
+ 'my.registry.net:5000', 'image'
+ )
+
+ def test_resolve_repository_name_private_registry_with_username(self):
+ assert auth.resolve_repository_name(
+ 'my.registry.net/username/image'
+ ) == ('my.registry.net', 'username/image')
+
+ def test_resolve_repository_name_no_dots_but_port(self):
+ assert auth.resolve_repository_name('hostname:5000/image') == (
+ 'hostname:5000', 'image'
+ )
+
+ def test_resolve_repository_name_no_dots_but_port_and_username(self):
+ assert auth.resolve_repository_name(
+ 'hostname:5000/username/image'
+ ) == ('hostname:5000', 'username/image')
+
+ def test_resolve_repository_name_localhost(self):
+ assert auth.resolve_repository_name('localhost/image') == (
+ 'localhost', 'image'
+ )
+
+ def test_resolve_repository_name_localhost_with_username(self):
+ assert auth.resolve_repository_name('localhost/username/image') == (
+ 'localhost', 'username/image'
+ )
+
+ def test_invalid_index_name(self):
+ with pytest.raises(errors.InvalidRepository):
+ auth.resolve_repository_name('-gecko.com/image')
+
+
+def encode_auth(auth_info):
+ return base64.b64encode(
+ auth_info.get('username', '').encode('utf-8') + b':' +
+ auth_info.get('password', '').encode('utf-8'))
+
+
+class ResolveAuthTest(unittest.TestCase):
+ index_config = {'auth': encode_auth({'username': 'indexuser'})}
+ private_config = {'auth': encode_auth({'username': 'privateuser'})}
+ legacy_config = {'auth': encode_auth({'username': 'legacyauth'})}
+
+ auth_config = auth.AuthConfig({
+ 'auths': auth.parse_auth({
+ 'https://index.docker.io/v1/': index_config,
+ 'my.registry.net': private_config,
+ 'http://legacy.registry.url/v1/': legacy_config,
+ })
+ })
+
+ def test_resolve_authconfig_hostname_only(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'my.registry.net'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_protocol(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'my.registry.net/v1/'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_path(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_path_trailing_slash(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net/'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_path_wrong_secure_proto(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'https://my.registry.net'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_no_path_wrong_insecure_proto(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://index.docker.io'
+ )['username'] == 'indexuser'
+
+ def test_resolve_authconfig_path_wrong_proto(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'https://my.registry.net/v1/'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_default_registry(self):
+ assert auth.resolve_authconfig(
+ self.auth_config
+ )['username'] == 'indexuser'
+
+ def test_resolve_authconfig_default_explicit_none(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, None
+ )['username'] == 'indexuser'
+
+ def test_resolve_authconfig_fully_explicit(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'http://my.registry.net/v1/'
+ )['username'] == 'privateuser'
+
+ def test_resolve_authconfig_legacy_config(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'legacy.registry.url'
+ )['username'] == 'legacyauth'
+
+ def test_resolve_authconfig_no_match(self):
+ assert auth.resolve_authconfig(
+ self.auth_config, 'does.not.exist'
+ ) is None
+
+ def test_resolve_registry_and_auth_library_image(self):
+ image = 'image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
+
+ def test_resolve_registry_and_auth_hub_image(self):
+ image = 'username/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
+
+ def test_resolve_registry_and_auth_explicit_hub(self):
+ image = 'docker.io/username/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
+
+ def test_resolve_registry_and_auth_explicit_legacy_hub(self):
+ image = 'index.docker.io/username/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'indexuser'
+
+ def test_resolve_registry_and_auth_private_registry(self):
+ image = 'my.registry.net/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ )['username'] == 'privateuser'
+
+ def test_resolve_registry_and_auth_unauthenticated_registry(self):
+ image = 'other.registry.net/image'
+ assert auth.resolve_authconfig(
+ self.auth_config, auth.resolve_repository_name(image)[0]
+ ) is None
+
+ def test_resolve_auth_with_empty_credstore_and_auth_dict(self):
+ auth_config = auth.AuthConfig({
+ 'auths': auth.parse_auth({
+ 'https://index.docker.io/v1/': self.index_config,
+ }),
+ 'credsStore': 'blackbox'
+ })
+ with mock.patch(
+ 'ansible_collections.community.docker.plugins.module_utils._api.auth.AuthConfig._resolve_authconfig_credstore'
+ ) as m:
+ m.return_value = None
+ assert 'indexuser' == auth.resolve_authconfig(
+ auth_config, None
+ )['username']
+
+
+class LoadConfigTest(unittest.TestCase):
+ def test_load_config_no_file(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg = auth.load_config(folder)
+ assert cfg is not None
+
+ def test_load_legacy_config(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg_path = os.path.join(folder, '.dockercfg')
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ with open(cfg_path, 'w') as f:
+ f.write('auth = {auth}\n'.format(auth=auth_))
+ f.write('email = sakuya@scarlet.net')
+
+ cfg = auth.load_config(cfg_path)
+ assert auth.resolve_authconfig(cfg) is not None
+ assert cfg.auths[auth.INDEX_NAME] is not None
+ cfg = cfg.auths[auth.INDEX_NAME]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('Auth') is None
+
+ def test_load_json_config(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg_path = os.path.join(folder, '.dockercfg')
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ email = 'sakuya@scarlet.net'
+ with open(cfg_path, 'w') as f:
+ json.dump(
+ {auth.INDEX_URL: {'auth': auth_, 'email': email}}, f
+ )
+ cfg = auth.load_config(cfg_path)
+ assert auth.resolve_authconfig(cfg) is not None
+ assert cfg.auths[auth.INDEX_URL] is not None
+ cfg = cfg.auths[auth.INDEX_URL]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == email
+ assert cfg.get('Auth') is None
+
+ def test_load_modern_json_config(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg_path = os.path.join(folder, 'config.json')
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ email = 'sakuya@scarlet.net'
+ with open(cfg_path, 'w') as f:
+ json.dump({
+ 'auths': {
+ auth.INDEX_URL: {
+ 'auth': auth_, 'email': email
+ }
+ }
+ }, f)
+ cfg = auth.load_config(cfg_path)
+ assert auth.resolve_authconfig(cfg) is not None
+ assert cfg.auths[auth.INDEX_URL] is not None
+ cfg = cfg.auths[auth.INDEX_URL]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == email
+
+ def test_load_config_with_random_name(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder,
+ '.{0}.dockercfg'.format(
+ random.randrange(100000)))
+ registry = 'https://your.private.registry.io'
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ config = {
+ registry: {
+ 'auth': '{auth}'.format(auth=auth_),
+ 'email': 'sakuya@scarlet.net'
+ }
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ cfg = auth.load_config(dockercfg_path).auths
+ assert registry in cfg
+ assert cfg[registry] is not None
+ cfg = cfg[registry]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
+
+ def test_load_config_custom_config_env(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ registry = 'https://your.private.registry.io'
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ config = {
+ registry: {
+ 'auth': '{auth}'.format(auth=auth_),
+ 'email': 'sakuya@scarlet.net'
+ }
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = auth.load_config(None).auths
+ assert registry in cfg
+ assert cfg[registry] is not None
+ cfg = cfg[registry]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
+
+ def test_load_config_custom_config_env_with_auths(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ registry = 'https://your.private.registry.io'
+ auth_ = base64.b64encode(b'sakuya:izayoi').decode('ascii')
+ config = {
+ 'auths': {
+ registry: {
+ 'auth': '{auth}'.format(auth=auth_),
+ 'email': 'sakuya@scarlet.net'
+ }
+ }
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = auth.load_config(None)
+ assert registry in cfg.auths
+ cfg = cfg.auths[registry]
+ assert cfg['username'] == 'sakuya'
+ assert cfg['password'] == 'izayoi'
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
+
+ def test_load_config_custom_config_env_utf8(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ registry = 'https://your.private.registry.io'
+ auth_ = base64.b64encode(
+ b'sakuya\xc3\xa6:izayoi\xc3\xa6').decode('ascii')
+ config = {
+ 'auths': {
+ registry: {
+ 'auth': '{auth}'.format(auth=auth_),
+ 'email': 'sakuya@scarlet.net'
+ }
+ }
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = auth.load_config(None)
+ assert registry in cfg.auths
+ cfg = cfg.auths[registry]
+ assert cfg['username'] == b'sakuya\xc3\xa6'.decode('utf8')
+ assert cfg['password'] == b'izayoi\xc3\xa6'.decode('utf8')
+ assert cfg['email'] == 'sakuya@scarlet.net'
+ assert cfg.get('auth') is None
+
+ def test_load_config_unknown_keys(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ cfg = auth.load_config(dockercfg_path)
+ assert dict(cfg) == {'auths': {}}
+
+ def test_load_config_invalid_auth_dict(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config = {
+ 'auths': {
+ 'scarlet.net': {'sakuya': 'izayoi'}
+ }
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ cfg = auth.load_config(dockercfg_path)
+ assert dict(cfg) == {'auths': {'scarlet.net': {}}}
+
+ def test_load_config_identity_token(self):
+ folder = tempfile.mkdtemp()
+ registry = 'scarlet.net'
+ token = '1ce1cebb-503e-7043-11aa-7feb8bd4a1ce'
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ auth_entry = encode_auth({'username': 'sakuya'}).decode('ascii')
+ config = {
+ 'auths': {
+ registry: {
+ 'auth': auth_entry,
+ 'identitytoken': token
+ }
+ }
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config, f)
+
+ cfg = auth.load_config(dockercfg_path)
+ assert registry in cfg.auths
+ cfg = cfg.auths[registry]
+ assert 'IdentityToken' in cfg
+ assert cfg['IdentityToken'] == token
+
+
+class CredstoreTest(unittest.TestCase):
+ def setUp(self):
+ self.authconfig = auth.AuthConfig({'credsStore': 'default'})
+ self.default_store = InMemoryStore('default')
+ self.authconfig._stores['default'] = self.default_store
+ self.default_store.store(
+ 'https://gensokyo.jp/v2', 'sakuya', 'izayoi',
+ )
+ self.default_store.store(
+ 'https://default.com/v2', 'user', 'hunter2',
+ )
+
+ def test_get_credential_store(self):
+ auth_config = auth.AuthConfig({
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ 'credsStore': 'blackbox',
+ })
+
+ assert auth_config.get_credential_store('registry1.io') == 'truesecret'
+ assert auth_config.get_credential_store('registry2.io') == 'powerlock'
+ assert auth_config.get_credential_store('registry3.io') == 'blackbox'
+
+ def test_get_credential_store_no_default(self):
+ auth_config = auth.AuthConfig({
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ })
+ assert auth_config.get_credential_store('registry2.io') == 'powerlock'
+ assert auth_config.get_credential_store('registry3.io') is None
+
+ def test_get_credential_store_default_index(self):
+ auth_config = auth.AuthConfig({
+ 'credHelpers': {
+ 'https://index.docker.io/v1/': 'powerlock'
+ },
+ 'credsStore': 'truesecret'
+ })
+
+ assert auth_config.get_credential_store(None) == 'powerlock'
+ assert auth_config.get_credential_store('docker.io') == 'powerlock'
+ assert auth_config.get_credential_store('images.io') == 'truesecret'
+
+ def test_get_credential_store_with_plain_dict(self):
+ auth_config = {
+ 'credHelpers': {
+ 'registry1.io': 'truesecret',
+ 'registry2.io': 'powerlock'
+ },
+ 'credsStore': 'blackbox',
+ }
+
+ assert auth.get_credential_store(
+ auth_config, 'registry1.io'
+ ) == 'truesecret'
+ assert auth.get_credential_store(
+ auth_config, 'registry2.io'
+ ) == 'powerlock'
+ assert auth.get_credential_store(
+ auth_config, 'registry3.io'
+ ) == 'blackbox'
+
+ def test_get_all_credentials_credstore_only(self):
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_with_empty_credhelper(self):
+ self.authconfig['credHelpers'] = {
+ 'registry1.io': 'truesecret',
+ }
+ self.authconfig._stores['truesecret'] = InMemoryStore()
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'registry1.io': None,
+ }
+
+ def test_get_all_credentials_with_credhelpers_only(self):
+ del self.authconfig['credsStore']
+ assert self.authconfig.get_all_credentials() == {}
+
+ self.authconfig['credHelpers'] = {
+ 'https://gensokyo.jp/v2': 'default',
+ 'https://default.com/v2': 'default',
+ }
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_with_auths_entries(self):
+ self.authconfig.add_auth('registry1.io', {
+ 'ServerAddress': 'registry1.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ })
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'registry1.io': {
+ 'ServerAddress': 'registry1.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ },
+ }
+
+ def test_get_all_credentials_with_empty_auths_entry(self):
+ self.authconfig.add_auth('default.com', {})
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_credstore_overrides_auth_entry(self):
+ self.authconfig.add_auth('default.com', {
+ 'Username': 'shouldnotsee',
+ 'Password': 'thisentry',
+ 'ServerAddress': 'https://default.com/v2',
+ })
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_helpers_override_default(self):
+ self.authconfig['credHelpers'] = {
+ 'https://default.com/v2': 'truesecret',
+ }
+ truesecret = InMemoryStore('truesecret')
+ truesecret.store('https://default.com/v2', 'reimu', 'hakurei')
+ self.authconfig._stores['truesecret'] = truesecret
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ }
+
+ def test_get_all_credentials_3_sources(self):
+ self.authconfig['credHelpers'] = {
+ 'registry1.io': 'truesecret',
+ }
+ truesecret = InMemoryStore('truesecret')
+ truesecret.store('registry1.io', 'reimu', 'hakurei')
+ self.authconfig._stores['truesecret'] = truesecret
+ self.authconfig.add_auth('registry2.io', {
+ 'ServerAddress': 'registry2.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ })
+
+ assert self.authconfig.get_all_credentials() == {
+ 'https://gensokyo.jp/v2': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'gensokyo.jp': {
+ 'Username': 'sakuya',
+ 'Password': 'izayoi',
+ 'ServerAddress': 'https://gensokyo.jp/v2',
+ },
+ 'https://default.com/v2': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'default.com': {
+ 'Username': 'user',
+ 'Password': 'hunter2',
+ 'ServerAddress': 'https://default.com/v2',
+ },
+ 'registry1.io': {
+ 'ServerAddress': 'registry1.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ },
+ 'registry2.io': {
+ 'ServerAddress': 'registry2.io',
+ 'Username': 'reimu',
+ 'Password': 'hakurei',
+ }
+ }
+
+
+class InMemoryStore(Store):
+ def __init__(self, *args, **kwargs):
+ self.__store = {}
+
+ def get(self, server):
+ try:
+ return self.__store[server]
+ except KeyError:
+ raise CredentialsNotFound()
+
+ def store(self, server, username, secret):
+ self.__store[server] = {
+ 'ServerURL': server,
+ 'Username': username,
+ 'Secret': secret,
+ }
+
+ def list(self):
+ return dict(
+ (k, v['Username']) for k, v in self.__store.items()
+ )
+
+ def erase(self, server):
+ del self.__store[server]
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/test_errors.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/test_errors.py
new file mode 100644
index 00000000..2cc114ed
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/test_errors.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+import sys
+
+import pytest
+import requests
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import (
+ APIError, DockerException,
+ create_unexpected_kwargs_error,
+ create_api_error_from_http_exception,
+)
+
+
+class APIErrorTest(unittest.TestCase):
+ def test_api_error_is_caught_by_dockerexception(self):
+ try:
+ raise APIError("this should be caught by DockerException")
+ except DockerException:
+ pass
+
+ def test_status_code_200(self):
+ """The status_code property is present with 200 response."""
+ resp = requests.Response()
+ resp.status_code = 200
+ err = APIError('', response=resp)
+ assert err.status_code == 200
+
+ def test_status_code_400(self):
+ """The status_code property is present with 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.status_code == 400
+
+ def test_status_code_500(self):
+ """The status_code property is present with 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.status_code == 500
+
+ def test_is_server_error_200(self):
+ """Report not server error on 200 response."""
+ resp = requests.Response()
+ resp.status_code = 200
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_300(self):
+ """Report not server error on 300 response."""
+ resp = requests.Response()
+ resp.status_code = 300
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_400(self):
+ """Report not server error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_server_error() is False
+
+ def test_is_server_error_500(self):
+ """Report server error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_server_error() is True
+
+ def test_is_client_error_500(self):
+ """Report not client error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_client_error() is False
+
+ def test_is_client_error_400(self):
+ """Report client error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_client_error() is True
+
+ def test_is_error_300(self):
+ """Report no error on 300 response."""
+ resp = requests.Response()
+ resp.status_code = 300
+ err = APIError('', response=resp)
+ assert err.is_error() is False
+
+ def test_is_error_400(self):
+ """Report error on 400 response."""
+ resp = requests.Response()
+ resp.status_code = 400
+ err = APIError('', response=resp)
+ assert err.is_error() is True
+
+ def test_is_error_500(self):
+ """Report error on 500 response."""
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('', response=resp)
+ assert err.is_error() is True
+
+ def test_create_error_from_exception(self):
+ resp = requests.Response()
+ resp.status_code = 500
+ err = APIError('')
+ try:
+ resp.raise_for_status()
+ except requests.exceptions.HTTPError as e:
+ try:
+ create_api_error_from_http_exception(e)
+ except APIError as e:
+ err = e
+ assert err.is_server_error() is True
+
+
+class CreateUnexpectedKwargsErrorTest(unittest.TestCase):
+ def test_create_unexpected_kwargs_error_single(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar'})
+ assert str(e) == "f() got an unexpected keyword argument 'foo'"
+
+ def test_create_unexpected_kwargs_error_multiple(self):
+ e = create_unexpected_kwargs_error('f', {'foo': 'bar', 'baz': 'bosh'})
+ assert str(e) == "f() got unexpected keyword arguments 'baz', 'foo'"
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/transport/test_sshconn.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/transport/test_sshconn.py
new file mode 100644
index 00000000..e9189f3e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/transport/test_sshconn.py
@@ -0,0 +1,57 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+import sys
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api.transport.sshconn import SSHSocket, SSHHTTPAdapter
+
+
+class SSHAdapterTest(unittest.TestCase):
+ @staticmethod
+ def test_ssh_hostname_prefix_trim():
+ conn = SSHHTTPAdapter(
+ base_url="ssh://user@hostname:1234", shell_out=True)
+ assert conn.ssh_host == "user@hostname:1234"
+
+ @staticmethod
+ def test_ssh_parse_url():
+ c = SSHSocket(host="user@hostname:1234")
+ assert c.host == "hostname"
+ assert c.port == "1234"
+ assert c.user == "user"
+
+ @staticmethod
+ def test_ssh_parse_hostname_only():
+ c = SSHSocket(host="hostname")
+ assert c.host == "hostname"
+ assert c.port is None
+ assert c.user is None
+
+ @staticmethod
+ def test_ssh_parse_user_and_hostname():
+ c = SSHSocket(host="user@hostname")
+ assert c.host == "hostname"
+ assert c.port is None
+ assert c.user == "user"
+
+ @staticmethod
+ def test_ssh_parse_hostname_and_port():
+ c = SSHSocket(host="hostname:22")
+ assert c.host == "hostname"
+ assert c.port == "22"
+ assert c.user is None
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/transport/test_ssladapter.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/transport/test_ssladapter.py
new file mode 100644
index 00000000..428163e6
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/transport/test_ssladapter.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+import sys
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api.transport import ssladapter
+
+try:
+ from backports.ssl_match_hostname import (
+ match_hostname, CertificateError
+ )
+except ImportError:
+ from ssl import (
+ match_hostname, CertificateError
+ )
+
+try:
+ from ssl import OP_NO_SSLv3, OP_NO_SSLv2, OP_NO_TLSv1
+except ImportError:
+ OP_NO_SSLv2 = 0x1000000
+ OP_NO_SSLv3 = 0x2000000
+ OP_NO_TLSv1 = 0x4000000
+
+
+class SSLAdapterTest(unittest.TestCase):
+ def test_only_uses_tls(self):
+ ssl_context = ssladapter.urllib3.util.ssl_.create_urllib3_context()
+
+ assert ssl_context.options & OP_NO_SSLv3
+ # if OpenSSL is compiled without SSL2 support, OP_NO_SSLv2 will be 0
+ assert not bool(OP_NO_SSLv2) or ssl_context.options & OP_NO_SSLv2
+ assert not ssl_context.options & OP_NO_TLSv1
+
+
+class MatchHostnameTest(unittest.TestCase):
+ cert = {
+ 'issuer': (
+ (('countryName', 'US'),),
+ (('stateOrProvinceName', 'California'),),
+ (('localityName', 'San Francisco'),),
+ (('organizationName', 'Docker Inc'),),
+ (('organizationalUnitName', 'Docker-Python'),),
+ (('commonName', 'localhost'),),
+ (('emailAddress', 'info@docker.com'),)
+ ),
+ 'notAfter': 'Mar 25 23:08:23 2030 GMT',
+ 'notBefore': 'Mar 25 23:08:23 2016 GMT',
+ 'serialNumber': 'BD5F894C839C548F',
+ 'subject': (
+ (('countryName', 'US'),),
+ (('stateOrProvinceName', 'California'),),
+ (('localityName', 'San Francisco'),),
+ (('organizationName', 'Docker Inc'),),
+ (('organizationalUnitName', 'Docker-Python'),),
+ (('commonName', 'localhost'),),
+ (('emailAddress', 'info@docker.com'),)
+ ),
+ 'subjectAltName': (
+ ('DNS', 'localhost'),
+ ('DNS', '*.gensokyo.jp'),
+ ('IP Address', '127.0.0.1'),
+ ),
+ 'version': 3
+ }
+
+ def test_match_ip_address_success(self):
+ assert match_hostname(self.cert, '127.0.0.1') is None
+
+ def test_match_localhost_success(self):
+ assert match_hostname(self.cert, 'localhost') is None
+
+ def test_match_dns_success(self):
+ assert match_hostname(self.cert, 'touhou.gensokyo.jp') is None
+
+ def test_match_ip_address_failure(self):
+ with pytest.raises(CertificateError):
+ match_hostname(self.cert, '192.168.0.25')
+
+ def test_match_dns_failure(self):
+ with pytest.raises(CertificateError):
+ match_hostname(self.cert, 'foobar.co.uk')
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_build.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_build.py
new file mode 100644
index 00000000..50eb703d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_build.py
@@ -0,0 +1,515 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import os.path
+import shutil
+import socket
+import tarfile
+import tempfile
+import unittest
+import sys
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api.constants import IS_WINDOWS_PLATFORM
+from ansible_collections.community.docker.plugins.module_utils._api.utils.build import exclude_paths, tar
+
+
+def make_tree(dirs, files):
+ base = tempfile.mkdtemp()
+
+ for path in dirs:
+ os.makedirs(os.path.join(base, path))
+
+ for path in files:
+ with open(os.path.join(base, path), 'w') as f:
+ f.write("content")
+
+ return base
+
+
+def convert_paths(collection):
+ return set(map(convert_path, collection))
+
+
+def convert_path(path):
+ return path.replace('/', os.path.sep)
+
+
+class ExcludePathsTest(unittest.TestCase):
+ dirs = [
+ 'foo',
+ 'foo/bar',
+ 'bar',
+ 'target',
+ 'target/subdir',
+ 'subdir',
+ 'subdir/target',
+ 'subdir/target/subdir',
+ 'subdir/subdir2',
+ 'subdir/subdir2/target',
+ 'subdir/subdir2/target/subdir'
+ ]
+
+ files = [
+ 'Dockerfile',
+ 'Dockerfile.alt',
+ '.dockerignore',
+ 'a.py',
+ 'a.go',
+ 'b.py',
+ 'cde.py',
+ 'foo/a.py',
+ 'foo/b.py',
+ 'foo/bar/a.py',
+ 'bar/a.py',
+ 'foo/Dockerfile3',
+ 'target/file.txt',
+ 'target/subdir/file.txt',
+ 'subdir/file.txt',
+ 'subdir/target/file.txt',
+ 'subdir/target/subdir/file.txt',
+ 'subdir/subdir2/file.txt',
+ 'subdir/subdir2/target/file.txt',
+ 'subdir/subdir2/target/subdir/file.txt',
+ ]
+
+ all_paths = set(dirs + files)
+
+ def setUp(self):
+ self.base = make_tree(self.dirs, self.files)
+
+ def tearDown(self):
+ shutil.rmtree(self.base)
+
+ def exclude(self, patterns, dockerfile=None):
+ return set(exclude_paths(self.base, patterns, dockerfile=dockerfile))
+
+ def test_no_excludes(self):
+ assert self.exclude(['']) == convert_paths(self.all_paths)
+
+ def test_no_dupes(self):
+ paths = exclude_paths(self.base, ['!a.py'])
+ assert sorted(paths) == sorted(set(paths))
+
+ def test_wildcard_exclude(self):
+ assert self.exclude(['*']) == set(['Dockerfile', '.dockerignore'])
+
+ def test_exclude_dockerfile_dockerignore(self):
+ """
+ Even if the .dockerignore file explicitly says to exclude
+ Dockerfile and/or .dockerignore, don't exclude them from
+ the actual tar file.
+ """
+ assert self.exclude(['Dockerfile', '.dockerignore']) == convert_paths(
+ self.all_paths
+ )
+
+ def test_exclude_custom_dockerfile(self):
+ """
+ If we're using a custom Dockerfile, make sure that's not
+ excluded.
+ """
+ assert self.exclude(['*'], dockerfile='Dockerfile.alt') == set(['Dockerfile.alt', '.dockerignore'])
+
+ assert self.exclude(
+ ['*'], dockerfile='foo/Dockerfile3'
+ ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+
+ # https://github.com/docker/docker-py/issues/1956
+ assert self.exclude(
+ ['*'], dockerfile='./foo/Dockerfile3'
+ ) == convert_paths(set(['foo/Dockerfile3', '.dockerignore']))
+
+ def test_exclude_dockerfile_child(self):
+ includes = self.exclude(['foo/'], dockerfile='foo/Dockerfile3')
+ assert convert_path('foo/Dockerfile3') in includes
+ assert convert_path('foo/a.py') not in includes
+
+ def test_single_filename(self):
+ assert self.exclude(['a.py']) == convert_paths(
+ self.all_paths - set(['a.py'])
+ )
+
+ def test_single_filename_leading_dot_slash(self):
+ assert self.exclude(['./a.py']) == convert_paths(
+ self.all_paths - set(['a.py'])
+ )
+
+ # As odd as it sounds, a filename pattern with a trailing slash on the
+ # end *will* result in that file being excluded.
+ def test_single_filename_trailing_slash(self):
+ assert self.exclude(['a.py/']) == convert_paths(
+ self.all_paths - set(['a.py'])
+ )
+
+ def test_wildcard_filename_start(self):
+ assert self.exclude(['*.py']) == convert_paths(
+ self.all_paths - set(['a.py', 'b.py', 'cde.py'])
+ )
+
+ def test_wildcard_with_exception(self):
+ assert self.exclude(['*.py', '!b.py']) == convert_paths(
+ self.all_paths - set(['a.py', 'cde.py'])
+ )
+
+ def test_wildcard_with_wildcard_exception(self):
+ assert self.exclude(['*.*', '!*.go']) == convert_paths(
+ self.all_paths - set([
+ 'a.py', 'b.py', 'cde.py', 'Dockerfile.alt',
+ ])
+ )
+
+ def test_wildcard_filename_end(self):
+ assert self.exclude(['a.*']) == convert_paths(
+ self.all_paths - set(['a.py', 'a.go'])
+ )
+
+ def test_question_mark(self):
+ assert self.exclude(['?.py']) == convert_paths(
+ self.all_paths - set(['a.py', 'b.py'])
+ )
+
+ def test_single_subdir_single_filename(self):
+ assert self.exclude(['foo/a.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py'])
+ )
+
+ def test_single_subdir_single_filename_leading_slash(self):
+ assert self.exclude(['/foo/a.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py'])
+ )
+
+ def test_exclude_include_absolute_path(self):
+ base = make_tree([], ['a.py', 'b.py'])
+ assert exclude_paths(
+ base,
+ ['/*', '!/*.py']
+ ) == set(['a.py', 'b.py'])
+
+ def test_single_subdir_with_path_traversal(self):
+ assert self.exclude(['foo/whoops/../a.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py'])
+ )
+
+ def test_single_subdir_wildcard_filename(self):
+ assert self.exclude(['foo/*.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py', 'foo/b.py'])
+ )
+
+ def test_wildcard_subdir_single_filename(self):
+ assert self.exclude(['*/a.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py', 'bar/a.py'])
+ )
+
+ def test_wildcard_subdir_wildcard_filename(self):
+ assert self.exclude(['*/*.py']) == convert_paths(
+ self.all_paths - set(['foo/a.py', 'foo/b.py', 'bar/a.py'])
+ )
+
+ def test_directory(self):
+ assert self.exclude(['foo']) == convert_paths(
+ self.all_paths - set([
+ 'foo', 'foo/a.py', 'foo/b.py', 'foo/bar', 'foo/bar/a.py',
+ 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_directory_with_trailing_slash(self):
+ assert self.exclude(['foo']) == convert_paths(
+ self.all_paths - set([
+ 'foo', 'foo/a.py', 'foo/b.py',
+ 'foo/bar', 'foo/bar/a.py', 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_directory_with_single_exception(self):
+ assert self.exclude(['foo', '!foo/bar/a.py']) == convert_paths(
+ self.all_paths - set([
+ 'foo/a.py', 'foo/b.py', 'foo', 'foo/bar',
+ 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_directory_with_subdir_exception(self):
+ assert self.exclude(['foo', '!foo/bar']) == convert_paths(
+ self.all_paths - set([
+ 'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
+ ])
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
+ )
+ def test_directory_with_subdir_exception_win32_pathsep(self):
+ assert self.exclude(['foo', '!foo\\bar']) == convert_paths(
+ self.all_paths - set([
+ 'foo/a.py', 'foo/b.py', 'foo', 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_directory_with_wildcard_exception(self):
+ assert self.exclude(['foo', '!foo/*.py']) == convert_paths(
+ self.all_paths - set([
+ 'foo/bar', 'foo/bar/a.py', 'foo', 'foo/Dockerfile3'
+ ])
+ )
+
+ def test_subdirectory(self):
+ assert self.exclude(['foo/bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
+ @pytest.mark.skipif(
+ not IS_WINDOWS_PLATFORM, reason='Backslash patterns only on Windows'
+ )
+ def test_subdirectory_win32_pathsep(self):
+ assert self.exclude(['foo\\bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
+ def test_double_wildcard(self):
+ assert self.exclude(['**/a.py']) == convert_paths(
+ self.all_paths - set([
+ 'a.py', 'foo/a.py', 'foo/bar/a.py', 'bar/a.py'
+ ])
+ )
+
+ assert self.exclude(['foo/**/bar']) == convert_paths(
+ self.all_paths - set(['foo/bar', 'foo/bar/a.py'])
+ )
+
+ def test_single_and_double_wildcard(self):
+ assert self.exclude(['**/target/*/*']) == convert_paths(
+ self.all_paths - set([
+ 'target/subdir/file.txt',
+ 'subdir/target/subdir/file.txt',
+ 'subdir/subdir2/target/subdir/file.txt',
+ ])
+ )
+
+ def test_trailing_double_wildcard(self):
+ assert self.exclude(['subdir/**']) == convert_paths(
+ self.all_paths - set([
+ 'subdir/file.txt',
+ 'subdir/target/file.txt',
+ 'subdir/target/subdir/file.txt',
+ 'subdir/subdir2/file.txt',
+ 'subdir/subdir2/target/file.txt',
+ 'subdir/subdir2/target/subdir/file.txt',
+ 'subdir/target',
+ 'subdir/target/subdir',
+ 'subdir/subdir2',
+ 'subdir/subdir2/target',
+ 'subdir/subdir2/target/subdir',
+ ])
+ )
+
+ def test_double_wildcard_with_exception(self):
+ assert self.exclude(['**', '!bar', '!foo/bar']) == convert_paths(
+ set([
+ 'foo/bar', 'foo/bar/a.py', 'bar', 'bar/a.py', 'Dockerfile',
+ '.dockerignore',
+ ])
+ )
+
+ def test_include_wildcard(self):
+ # This may be surprising but it matches the CLI's behavior
+ # (tested with 18.05.0-ce on linux)
+ base = make_tree(['a'], ['a/b.py'])
+ assert exclude_paths(
+ base,
+ ['*', '!*/b.py']
+ ) == set()
+
+ def test_last_line_precedence(self):
+ base = make_tree(
+ [],
+ ['garbage.md',
+ 'trash.md',
+ 'README.md',
+ 'README-bis.md',
+ 'README-secret.md'])
+ assert exclude_paths(
+ base,
+ ['*.md', '!README*.md', 'README-secret.md']
+ ) == set(['README.md', 'README-bis.md'])
+
+ def test_parent_directory(self):
+ base = make_tree(
+ [],
+ ['a.py',
+ 'b.py',
+ 'c.py'])
+ # Dockerignore reference stipulates that absolute paths are
+ # equivalent to relative paths, hence /../foo should be
+ # equivalent to ../foo. It also stipulates that paths are run
+ # through Go's filepath.Clean, which explicitly "replace
+ # "/.." by "/" at the beginning of a path".
+ assert exclude_paths(
+ base,
+ ['../a.py', '/../b.py']
+ ) == set(['c.py'])
+
+
+class TarTest(unittest.TestCase):
+ def test_tar_with_excludes(self):
+ dirs = [
+ 'foo',
+ 'foo/bar',
+ 'bar',
+ ]
+
+ files = [
+ 'Dockerfile',
+ 'Dockerfile.alt',
+ '.dockerignore',
+ 'a.py',
+ 'a.go',
+ 'b.py',
+ 'cde.py',
+ 'foo/a.py',
+ 'foo/b.py',
+ 'foo/bar/a.py',
+ 'bar/a.py',
+ ]
+
+ exclude = [
+ '*.py',
+ '!b.py',
+ '!a.go',
+ 'foo',
+ 'Dockerfile*',
+ '.dockerignore',
+ ]
+
+ expected_names = set([
+ 'Dockerfile',
+ '.dockerignore',
+ 'a.go',
+ 'b.py',
+ 'bar',
+ 'bar/a.py',
+ ])
+
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+
+ with tar(base, exclude=exclude) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == sorted(expected_names)
+
+ def test_tar_with_empty_directory(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'foo']
+
+ @pytest.mark.skipif(
+ IS_WINDOWS_PLATFORM or os.geteuid() == 0,
+ reason='root user always has access ; no chmod on Windows'
+ )
+ def test_tar_with_inaccessible_file(self):
+ base = tempfile.mkdtemp()
+ full_path = os.path.join(base, 'foo')
+ self.addCleanup(shutil.rmtree, base)
+ with open(full_path, 'w') as f:
+ f.write('content')
+ os.chmod(full_path, 0o222)
+ with pytest.raises(IOError) as ei:
+ tar(base)
+
+ assert 'Can not read file in context: {full_path}'.format(full_path=full_path) in (
+ ei.exconly()
+ )
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_with_file_symlinks(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ with open(os.path.join(base, 'foo'), 'w') as f:
+ f.write("content")
+ os.makedirs(os.path.join(base, 'bar'))
+ os.symlink('../foo', os.path.join(base, 'bar/foo'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_with_directory_symlinks(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+ os.symlink('../foo', os.path.join(base, 'bar/foo'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_with_broken_symlinks(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+
+ os.symlink('../baz', os.path.join(base, 'bar/foo'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'bar/foo', 'foo']
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No UNIX sockets on Win32')
+ def test_tar_socket_file(self):
+ base = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, base)
+ for d in ['foo', 'bar']:
+ os.makedirs(os.path.join(base, d))
+ sock = socket.socket(socket.AF_UNIX)
+ self.addCleanup(sock.close)
+ sock.bind(os.path.join(base, 'test.sock'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert sorted(tar_data.getnames()) == ['bar', 'foo']
+
+ def tar_test_negative_mtime_bug(self):
+ base = tempfile.mkdtemp()
+ filename = os.path.join(base, 'th.txt')
+ self.addCleanup(shutil.rmtree, base)
+ with open(filename, 'w') as f:
+ f.write('Invisible Full Moon')
+ os.utime(filename, (12345, -3600.0))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ assert tar_data.getnames() == ['th.txt']
+ assert tar_data.getmember('th.txt').mtime == -3600
+
+ @pytest.mark.skipif(IS_WINDOWS_PLATFORM, reason='No symlinks on Windows')
+ def test_tar_directory_link(self):
+ dirs = ['a', 'b', 'a/c']
+ files = ['a/hello.py', 'b/utils.py', 'a/c/descend.py']
+ base = make_tree(dirs, files)
+ self.addCleanup(shutil.rmtree, base)
+ os.symlink(os.path.join(base, 'b'), os.path.join(base, 'a/c/b'))
+ with tar(base) as archive:
+ tar_data = tarfile.open(fileobj=archive)
+ names = tar_data.getnames()
+ for member in dirs + files:
+ assert member in names
+ assert 'a/c/b' in names
+ assert 'a/c/b/utils.py' not in names
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_config.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_config.py
new file mode 100644
index 00000000..9448f384
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_config.py
@@ -0,0 +1,141 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import unittest
+import shutil
+import tempfile
+import json
+import sys
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from pytest import mark, fixture
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils import config
+
+try:
+ from unittest import mock
+except ImportError:
+ import mock
+
+
+class FindConfigFileTest(unittest.TestCase):
+
+ @fixture(autouse=True)
+ def tmpdir(self, tmpdir):
+ self.mkdir = tmpdir.mkdir
+
+ def test_find_config_fallback(self):
+ tmpdir = self.mkdir('test_find_config_fallback')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() is None
+
+ def test_find_config_from_explicit_path(self):
+ tmpdir = self.mkdir('test_find_config_from_explicit_path')
+ config_path = tmpdir.ensure('my-config-file.json')
+
+ assert config.find_config_file(str(config_path)) == str(config_path)
+
+ def test_find_config_from_environment(self):
+ tmpdir = self.mkdir('test_find_config_from_environment')
+ config_path = tmpdir.ensure('config.json')
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_posix(self):
+ tmpdir = self.mkdir('test_find_config_from_home_posix')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform == 'win32'")
+ def test_find_config_from_home_legacy_name(self):
+ tmpdir = self.mkdir('test_find_config_from_home_legacy_name')
+ config_path = tmpdir.ensure('.dockercfg')
+
+ with mock.patch.dict(os.environ, {'HOME': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+ @mark.skipif("sys.platform != 'win32'")
+ def test_find_config_from_home_windows(self):
+ tmpdir = self.mkdir('test_find_config_from_home_windows')
+ config_path = tmpdir.ensure('.docker', 'config.json')
+
+ with mock.patch.dict(os.environ, {'USERPROFILE': str(tmpdir)}):
+ assert config.find_config_file() == str(config_path)
+
+
+class LoadConfigTest(unittest.TestCase):
+ def test_load_config_no_file(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ cfg = config.load_general_config(folder)
+ assert cfg is not None
+ assert isinstance(cfg, dict)
+ assert not cfg
+
+ def test_load_config_custom_headers(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'HttpHeaders': {
+ 'Name': 'Spike',
+ 'Surname': 'Spiegel'
+ },
+ }
+
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ cfg = config.load_general_config(dockercfg_path)
+ assert 'HttpHeaders' in cfg
+ assert cfg['HttpHeaders'] == {
+ 'Name': 'Spike',
+ 'Surname': 'Spiegel'
+ }
+
+ def test_load_config_detach_keys(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ cfg = config.load_general_config(dockercfg_path)
+ assert cfg == config_data
+
+ def test_load_config_from_env(self):
+ folder = tempfile.mkdtemp()
+ self.addCleanup(shutil.rmtree, folder)
+ dockercfg_path = os.path.join(folder, 'config.json')
+ config_data = {
+ 'detachKeys': 'ctrl-q, ctrl-u, ctrl-i'
+ }
+ with open(dockercfg_path, 'w') as f:
+ json.dump(config_data, f)
+
+ with mock.patch.dict(os.environ, {'DOCKER_CONFIG': folder}):
+ cfg = config.load_general_config(None)
+ assert cfg == config_data
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_decorators.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_decorators.py
new file mode 100644
index 00000000..8ba1ec5f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_decorators.py
@@ -0,0 +1,54 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+import sys
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api.api.client import APIClient
+from ansible_collections.community.docker.plugins.module_utils._api.constants import DEFAULT_DOCKER_API_VERSION
+from ansible_collections.community.docker.plugins.module_utils._api.utils.decorators import update_headers
+
+
+class DecoratorsTest(unittest.TestCase):
+ def test_update_headers(self):
+ sample_headers = {
+ 'X-Docker-Locale': 'en-US',
+ }
+
+ def f(self, headers=None):
+ return headers
+
+ client = APIClient(version=DEFAULT_DOCKER_API_VERSION)
+ client._general_configs = {}
+
+ g = update_headers(f)
+ assert g(client, headers=None) is None
+ assert g(client, headers={}) == {}
+ assert g(client, headers={'Content-type': 'application/json'}) == {
+ 'Content-type': 'application/json',
+ }
+
+ client._general_configs = {
+ 'HttpHeaders': sample_headers
+ }
+
+ assert g(client, headers=None) == sample_headers
+ assert g(client, headers={}) == sample_headers
+ assert g(client, headers={'Content-type': 'application/json'}) == {
+ 'Content-type': 'application/json',
+ 'X-Docker-Locale': 'en-US',
+ }
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_json_stream.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_json_stream.py
new file mode 100644
index 00000000..2d7f300f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_json_stream.py
@@ -0,0 +1,77 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils.json_stream import json_splitter, stream_as_text, json_stream
+
+
+class TestJsonSplitter:
+
+ def test_json_splitter_no_object(self):
+ data = '{"foo": "bar'
+ assert json_splitter(data) is None
+
+ def test_json_splitter_with_object(self):
+ data = '{"foo": "bar"}\n \n{"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+ def test_json_splitter_leading_whitespace(self):
+ data = '\n \r{"foo": "bar"}\n\n {"next": "obj"}'
+ assert json_splitter(data) == ({'foo': 'bar'}, '{"next": "obj"}')
+
+
+class TestStreamAsText:
+
+ def test_stream_with_non_utf_unicode_character(self):
+ stream = [b'\xed\xf3\xf3']
+ output, = stream_as_text(stream)
+ assert output == u'���'
+
+ def test_stream_with_utf_character(self):
+ stream = [u'ěĝ'.encode('utf-8')]
+ output, = stream_as_text(stream)
+ assert output == u'ěĝ'
+
+
+class TestJsonStream:
+
+ def test_with_falsy_entries(self):
+ stream = [
+ '{"one": "two"}\n{}\n',
+ "[1, 2, 3]\n[]\n",
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {},
+ [1, 2, 3],
+ [],
+ ]
+
+ def test_with_leading_whitespace(self):
+ stream = [
+ '\n \r\n {"one": "two"}{"x": 1}',
+ ' {"three": "four"}\t\t{"x": 2}'
+ ]
+ output = list(json_stream(stream))
+ assert output == [
+ {'one': 'two'},
+ {'x': 1},
+ {'three': 'four'},
+ {'x': 2}
+ ]
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_ports.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_ports.py
new file mode 100644
index 00000000..c1a08a12
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_ports.py
@@ -0,0 +1,162 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+import sys
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils.ports import build_port_bindings, split_port
+
+
+class PortsTest(unittest.TestCase):
+ def test_split_port_with_host_ip(self):
+ internal_port, external_port = split_port("127.0.0.1:1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("127.0.0.1", "1000")]
+
+ def test_split_port_with_protocol(self):
+ for protocol in ['tcp', 'udp', 'sctp']:
+ internal_port, external_port = split_port(
+ "127.0.0.1:1000:2000/" + protocol
+ )
+ assert internal_port == ["2000/" + protocol]
+ assert external_port == [("127.0.0.1", "1000")]
+
+ def test_split_port_with_host_ip_no_port(self):
+ internal_port, external_port = split_port("127.0.0.1::2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("127.0.0.1", None)]
+
+ def test_split_port_range_with_host_ip_no_port(self):
+ internal_port, external_port = split_port("127.0.0.1::2000-2001")
+ assert internal_port == ["2000", "2001"]
+ assert external_port == [("127.0.0.1", None), ("127.0.0.1", None)]
+
+ def test_split_port_with_host_port(self):
+ internal_port, external_port = split_port("1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == ["1000"]
+
+ def test_split_port_range_with_host_port(self):
+ internal_port, external_port = split_port("1000-1001:2000-2001")
+ assert internal_port == ["2000", "2001"]
+ assert external_port == ["1000", "1001"]
+
+ def test_split_port_random_port_range_with_host_port(self):
+ internal_port, external_port = split_port("1000-1001:2000")
+ assert internal_port == ["2000"]
+ assert external_port == ["1000-1001"]
+
+ def test_split_port_no_host_port(self):
+ internal_port, external_port = split_port("2000")
+ assert internal_port == ["2000"]
+ assert external_port is None
+
+ def test_split_port_range_no_host_port(self):
+ internal_port, external_port = split_port("2000-2001")
+ assert internal_port == ["2000", "2001"]
+ assert external_port is None
+
+ def test_split_port_range_with_protocol(self):
+ internal_port, external_port = split_port(
+ "127.0.0.1:1000-1001:2000-2001/udp")
+ assert internal_port == ["2000/udp", "2001/udp"]
+ assert external_port == [("127.0.0.1", "1000"), ("127.0.0.1", "1001")]
+
+ def test_split_port_with_ipv6_address(self):
+ internal_port, external_port = split_port(
+ "2001:abcd:ef00::2:1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("2001:abcd:ef00::2", "1000")]
+
+ def test_split_port_with_ipv6_square_brackets_address(self):
+ internal_port, external_port = split_port(
+ "[2001:abcd:ef00::2]:1000:2000")
+ assert internal_port == ["2000"]
+ assert external_port == [("2001:abcd:ef00::2", "1000")]
+
+ def test_split_port_invalid(self):
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000:tcp")
+
+ def test_split_port_invalid_protocol(self):
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000/ftp")
+
+ def test_non_matching_length_port_ranges(self):
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000-1010:2000-2002/tcp")
+
+ def test_port_and_range_invalid(self):
+ with pytest.raises(ValueError):
+ split_port("0.0.0.0:1000:2000-2002/tcp")
+
+ def test_port_only_with_colon(self):
+ with pytest.raises(ValueError):
+ split_port(":80")
+
+ def test_host_only_with_colon(self):
+ with pytest.raises(ValueError):
+ split_port("localhost:")
+
+ def test_with_no_container_port(self):
+ with pytest.raises(ValueError):
+ split_port("localhost:80:")
+
+ def test_split_port_empty_string(self):
+ with pytest.raises(ValueError):
+ split_port("")
+
+ def test_split_port_non_string(self):
+ assert split_port(1243) == (['1243'], None)
+
+ def test_build_port_bindings_with_one_port(self):
+ port_bindings = build_port_bindings(["127.0.0.1:1000:1000"])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+
+ def test_build_port_bindings_with_matching_internal_ports(self):
+ port_bindings = build_port_bindings(
+ ["127.0.0.1:1000:1000", "127.0.0.1:2000:1000"])
+ assert port_bindings["1000"] == [
+ ("127.0.0.1", "1000"), ("127.0.0.1", "2000")
+ ]
+
+ def test_build_port_bindings_with_nonmatching_internal_ports(self):
+ port_bindings = build_port_bindings(
+ ["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["2000"] == [("127.0.0.1", "2000")]
+
+ def test_build_port_bindings_with_port_range(self):
+ port_bindings = build_port_bindings(["127.0.0.1:1000-1001:1000-1001"])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["1001"] == [("127.0.0.1", "1001")]
+
+ def test_build_port_bindings_with_matching_internal_port_ranges(self):
+ port_bindings = build_port_bindings(
+ ["127.0.0.1:1000-1001:1000-1001", "127.0.0.1:2000-2001:1000-1001"])
+ assert port_bindings["1000"] == [
+ ("127.0.0.1", "1000"), ("127.0.0.1", "2000")
+ ]
+ assert port_bindings["1001"] == [
+ ("127.0.0.1", "1001"), ("127.0.0.1", "2001")
+ ]
+
+ def test_build_port_bindings_with_nonmatching_internal_port_ranges(self):
+ port_bindings = build_port_bindings(
+ ["127.0.0.1:1000:1000", "127.0.0.1:2000:2000"])
+ assert port_bindings["1000"] == [("127.0.0.1", "1000")]
+ assert port_bindings["2000"] == [("127.0.0.1", "2000")]
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_proxy.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_proxy.py
new file mode 100644
index 00000000..0eb24270
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_proxy.py
@@ -0,0 +1,100 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import unittest
+import sys
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils.proxy import ProxyConfig
+
+
+HTTP = 'http://test:80'
+HTTPS = 'https://test:443'
+FTP = 'ftp://user:password@host:23'
+NO_PROXY = 'localhost,.localdomain'
+CONFIG = ProxyConfig(http=HTTP, https=HTTPS, ftp=FTP, no_proxy=NO_PROXY)
+ENV = {
+ 'http_proxy': HTTP,
+ 'HTTP_PROXY': HTTP,
+ 'https_proxy': HTTPS,
+ 'HTTPS_PROXY': HTTPS,
+ 'ftp_proxy': FTP,
+ 'FTP_PROXY': FTP,
+ 'no_proxy': NO_PROXY,
+ 'NO_PROXY': NO_PROXY,
+}
+
+
+class ProxyConfigTest(unittest.TestCase):
+
+ def test_from_dict(self):
+ config = ProxyConfig.from_dict({
+ 'httpProxy': HTTP,
+ 'httpsProxy': HTTPS,
+ 'ftpProxy': FTP,
+ 'noProxy': NO_PROXY
+ })
+ self.assertEqual(CONFIG.http, config.http)
+ self.assertEqual(CONFIG.https, config.https)
+ self.assertEqual(CONFIG.ftp, config.ftp)
+ self.assertEqual(CONFIG.no_proxy, config.no_proxy)
+
+ def test_new(self):
+ config = ProxyConfig()
+ self.assertIsNone(config.http)
+ self.assertIsNone(config.https)
+ self.assertIsNone(config.ftp)
+ self.assertIsNone(config.no_proxy)
+
+ config = ProxyConfig(http='a', https='b', ftp='c', no_proxy='d')
+ self.assertEqual(config.http, 'a')
+ self.assertEqual(config.https, 'b')
+ self.assertEqual(config.ftp, 'c')
+ self.assertEqual(config.no_proxy, 'd')
+
+ def test_truthiness(self):
+ assert not ProxyConfig()
+ assert ProxyConfig(http='non-zero')
+ assert ProxyConfig(https='non-zero')
+ assert ProxyConfig(ftp='non-zero')
+ assert ProxyConfig(no_proxy='non-zero')
+
+ def test_environment(self):
+ self.assertDictEqual(CONFIG.get_environment(), ENV)
+ empty = ProxyConfig()
+ self.assertDictEqual(empty.get_environment(), {})
+
+ def test_inject_proxy_environment(self):
+ # Proxy config is non null, env is None.
+ self.assertSetEqual(
+ set(CONFIG.inject_proxy_environment(None)),
+ set('{k}={v}'.format(k=k, v=v) for k, v in ENV.items()))
+
+ # Proxy config is null, env is None.
+ self.assertIsNone(ProxyConfig().inject_proxy_environment(None), None)
+
+ env = ['FOO=BAR', 'BAR=BAZ']
+
+ # Proxy config is non null, env is non null
+ actual = CONFIG.inject_proxy_environment(env)
+ expected = ['{k}={v}'.format(k=k, v=v) for k, v in ENV.items()] + env
+ # It's important that the first 8 variables are the ones from the proxy
+ # config, and the last 2 are the ones from the input environment
+ self.assertSetEqual(set(actual[:8]), set(expected[:8]))
+ self.assertSetEqual(set(actual[-2:]), set(expected[-2:]))
+
+ # Proxy is null, and is non null
+ self.assertListEqual(ProxyConfig().inject_proxy_environment(env), env)
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_utils.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_utils.py
new file mode 100644
index 00000000..cc0dc695
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/test_utils.py
@@ -0,0 +1,488 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import os
+import os.path
+import shutil
+import tempfile
+import unittest
+import sys
+
+from ansible.module_utils.six import PY3
+
+import pytest
+
+if sys.version_info < (2, 7):
+ pytestmark = pytest.mark.skip('Python 2.6 is not supported')
+
+from ansible_collections.community.docker.plugins.module_utils._api.api.client import APIClient
+from ansible_collections.community.docker.plugins.module_utils._api.constants import IS_WINDOWS_PLATFORM, DEFAULT_DOCKER_API_VERSION
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
+ convert_filters, convert_volume_binds,
+ decode_json_header, kwargs_from_env, parse_bytes,
+ parse_devices, parse_env_file, parse_host,
+ parse_repository_tag, split_command, format_environment,
+)
+
+
+TEST_CERT_DIR = os.path.join(
+ os.path.dirname(__file__),
+ 'testdata/certs',
+)
+
+
+class KwargsFromEnvTest(unittest.TestCase):
+ def setUp(self):
+ self.os_environ = os.environ.copy()
+
+ def tearDown(self):
+ os.environ = self.os_environ
+
+ def test_kwargs_from_env_empty(self):
+ os.environ.update(DOCKER_HOST='',
+ DOCKER_CERT_PATH='')
+ os.environ.pop('DOCKER_TLS_VERIFY', None)
+
+ kwargs = kwargs_from_env()
+ assert kwargs.get('base_url') is None
+ assert kwargs.get('tls') is None
+
+ def test_kwargs_from_env_tls(self):
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY='1')
+ kwargs = kwargs_from_env(assert_hostname=False)
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
+ assert 'ca.pem' in kwargs['tls'].ca_cert
+ assert 'cert.pem' in kwargs['tls'].cert[0]
+ assert 'key.pem' in kwargs['tls'].cert[1]
+ assert kwargs['tls'].assert_hostname is False
+ assert kwargs['tls'].verify
+
+ parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True)
+ kwargs['version'] = DEFAULT_DOCKER_API_VERSION
+ try:
+ client = APIClient(**kwargs)
+ assert parsed_host == client.base_url
+ assert kwargs['tls'].ca_cert == client.verify
+ assert kwargs['tls'].cert == client.cert
+ except TypeError as e:
+ self.fail(e)
+
+ def test_kwargs_from_env_tls_verify_false(self):
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY='')
+ kwargs = kwargs_from_env(assert_hostname=True)
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
+ assert 'ca.pem' in kwargs['tls'].ca_cert
+ assert 'cert.pem' in kwargs['tls'].cert[0]
+ assert 'key.pem' in kwargs['tls'].cert[1]
+ assert kwargs['tls'].assert_hostname is True
+ assert kwargs['tls'].verify is False
+ parsed_host = parse_host(kwargs['base_url'], IS_WINDOWS_PLATFORM, True)
+ kwargs['version'] = DEFAULT_DOCKER_API_VERSION
+ try:
+ client = APIClient(**kwargs)
+ assert parsed_host == client.base_url
+ assert kwargs['tls'].cert == client.cert
+ assert not kwargs['tls'].verify
+ except TypeError as e:
+ self.fail(e)
+
+ def test_kwargs_from_env_tls_verify_false_no_cert(self):
+ temp_dir = tempfile.mkdtemp()
+ cert_dir = os.path.join(temp_dir, '.docker')
+ shutil.copytree(TEST_CERT_DIR, cert_dir)
+
+ os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376',
+ HOME=temp_dir,
+ DOCKER_TLS_VERIFY='')
+ os.environ.pop('DOCKER_CERT_PATH', None)
+ kwargs = kwargs_from_env(assert_hostname=True)
+ assert 'tcp://192.168.59.103:2376' == kwargs['base_url']
+
+ def test_kwargs_from_env_no_cert_path(self):
+ try:
+ temp_dir = tempfile.mkdtemp()
+ cert_dir = os.path.join(temp_dir, '.docker')
+ shutil.copytree(TEST_CERT_DIR, cert_dir)
+
+ os.environ.update(HOME=temp_dir,
+ DOCKER_CERT_PATH='',
+ DOCKER_TLS_VERIFY='1')
+
+ kwargs = kwargs_from_env()
+ assert kwargs['tls'].verify
+ assert cert_dir in kwargs['tls'].ca_cert
+ assert cert_dir in kwargs['tls'].cert[0]
+ assert cert_dir in kwargs['tls'].cert[1]
+ finally:
+ if temp_dir:
+ shutil.rmtree(temp_dir)
+
+ def test_kwargs_from_env_alternate_env(self):
+ # Values in os.environ are entirely ignored if an alternate is
+ # provided
+ os.environ.update(
+ DOCKER_HOST='tcp://192.168.59.103:2376',
+ DOCKER_CERT_PATH=TEST_CERT_DIR,
+ DOCKER_TLS_VERIFY=''
+ )
+ kwargs = kwargs_from_env(environment={
+ 'DOCKER_HOST': 'http://docker.gensokyo.jp:2581',
+ })
+ assert 'http://docker.gensokyo.jp:2581' == kwargs['base_url']
+ assert 'tls' not in kwargs
+
+
+class ConverVolumeBindsTest(unittest.TestCase):
+ def test_convert_volume_binds_empty(self):
+ assert convert_volume_binds({}) == []
+ assert convert_volume_binds([]) == []
+
+ def test_convert_volume_binds_list(self):
+ data = ['/a:/a:ro', '/b:/c:z']
+ assert convert_volume_binds(data) == data
+
+ def test_convert_volume_binds_complete(self):
+ data = {
+ '/mnt/vol1': {
+ 'bind': '/data',
+ 'mode': 'ro'
+ }
+ }
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:ro']
+
+ def test_convert_volume_binds_compact(self):
+ data = {
+ '/mnt/vol1': '/data'
+ }
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
+
+ def test_convert_volume_binds_no_mode(self):
+ data = {
+ '/mnt/vol1': {
+ 'bind': '/data'
+ }
+ }
+ assert convert_volume_binds(data) == ['/mnt/vol1:/data:rw']
+
+ def test_convert_volume_binds_unicode_bytes_input(self):
+ expected = [u'/mnt/지연:/unicode/박:rw']
+
+ data = {
+ u'/mnt/지연'.encode('utf-8'): {
+ 'bind': u'/unicode/박'.encode('utf-8'),
+ 'mode': u'rw'
+ }
+ }
+ assert convert_volume_binds(data) == expected
+
+ def test_convert_volume_binds_unicode_unicode_input(self):
+ expected = [u'/mnt/지연:/unicode/박:rw']
+
+ data = {
+ u'/mnt/지연': {
+ 'bind': u'/unicode/박',
+ 'mode': u'rw'
+ }
+ }
+ assert convert_volume_binds(data) == expected
+
+
+class ParseEnvFileTest(unittest.TestCase):
+ def generate_tempfile(self, file_content=None):
+ """
+ Generates a temporary file for tests with the content
+ of 'file_content' and returns the filename.
+ Don't forget to unlink the file with os.unlink() after.
+ """
+ local_tempfile = tempfile.NamedTemporaryFile(delete=False)
+ local_tempfile.write(file_content.encode('UTF-8'))
+ local_tempfile.close()
+ return local_tempfile.name
+
+ def test_parse_env_file_proper(self):
+ env_file = self.generate_tempfile(
+ file_content='USER=jdoe\nPASS=secret')
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
+ os.unlink(env_file)
+
+ def test_parse_env_file_with_equals_character(self):
+ env_file = self.generate_tempfile(
+ file_content='USER=jdoe\nPASS=sec==ret')
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'sec==ret'}
+ os.unlink(env_file)
+
+ def test_parse_env_file_commented_line(self):
+ env_file = self.generate_tempfile(
+ file_content='USER=jdoe\n#PASS=secret')
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe'}
+ os.unlink(env_file)
+
+ def test_parse_env_file_newline(self):
+ env_file = self.generate_tempfile(
+ file_content='\nUSER=jdoe\n\n\nPASS=secret')
+ get_parse_env_file = parse_env_file(env_file)
+ assert get_parse_env_file == {'USER': 'jdoe', 'PASS': 'secret'}
+ os.unlink(env_file)
+
+ def test_parse_env_file_invalid_line(self):
+ env_file = self.generate_tempfile(
+ file_content='USER jdoe')
+ with pytest.raises(DockerException):
+ parse_env_file(env_file)
+ os.unlink(env_file)
+
+
+class ParseHostTest(unittest.TestCase):
+ def test_parse_host(self):
+ invalid_hosts = [
+ '0.0.0.0',
+ 'tcp://',
+ 'udp://127.0.0.1',
+ 'udp://127.0.0.1:2375',
+ 'ssh://:22/path',
+ 'tcp://netloc:3333/path?q=1',
+ 'unix:///sock/path#fragment',
+ 'https://netloc:3333/path;params',
+ 'ssh://:clearpassword@host:22',
+ ]
+
+ valid_hosts = {
+ '0.0.0.1:5555': 'http://0.0.0.1:5555',
+ ':6666': 'http://127.0.0.1:6666',
+ 'tcp://:7777': 'http://127.0.0.1:7777',
+ 'http://:7777': 'http://127.0.0.1:7777',
+ 'https://kokia.jp:2375': 'https://kokia.jp:2375',
+ 'unix:///var/run/docker.sock': 'http+unix:///var/run/docker.sock',
+ 'unix://': 'http+unix:///var/run/docker.sock',
+ '12.234.45.127:2375/docker/engine': (
+ 'http://12.234.45.127:2375/docker/engine'
+ ),
+ 'somehost.net:80/service/swarm': (
+ 'http://somehost.net:80/service/swarm'
+ ),
+ 'npipe:////./pipe/docker_engine': 'npipe:////./pipe/docker_engine',
+ '[fd12::82d1]:2375': 'http://[fd12::82d1]:2375',
+ 'https://[fd12:5672::12aa]:1090': 'https://[fd12:5672::12aa]:1090',
+ '[fd12::82d1]:2375/docker/engine': (
+ 'http://[fd12::82d1]:2375/docker/engine'
+ ),
+ 'ssh://[fd12::82d1]': 'ssh://[fd12::82d1]:22',
+ 'ssh://user@[fd12::82d1]:8765': 'ssh://user@[fd12::82d1]:8765',
+ 'ssh://': 'ssh://127.0.0.1:22',
+ 'ssh://user@localhost:22': 'ssh://user@localhost:22',
+ 'ssh://user@remote': 'ssh://user@remote:22',
+ }
+
+ for host in invalid_hosts:
+ msg = 'Should have failed to parse invalid host: {0}'.format(host)
+ with self.assertRaises(DockerException, msg=msg):
+ parse_host(host, None)
+
+ for host, expected in valid_hosts.items():
+ self.assertEqual(
+ parse_host(host, None),
+ expected,
+ msg='Failed to parse valid host: {0}'.format(host),
+ )
+
+ def test_parse_host_empty_value(self):
+ unix_socket = 'http+unix:///var/run/docker.sock'
+ npipe = 'npipe:////./pipe/docker_engine'
+
+ for val in [None, '']:
+ assert parse_host(val, is_win32=False) == unix_socket
+ assert parse_host(val, is_win32=True) == npipe
+
+ def test_parse_host_tls(self):
+ host_value = 'myhost.docker.net:3348'
+ expected_result = 'https://myhost.docker.net:3348'
+ assert parse_host(host_value, tls=True) == expected_result
+
+ def test_parse_host_tls_tcp_proto(self):
+ host_value = 'tcp://myhost.docker.net:3348'
+ expected_result = 'https://myhost.docker.net:3348'
+ assert parse_host(host_value, tls=True) == expected_result
+
+ def test_parse_host_trailing_slash(self):
+ host_value = 'tcp://myhost.docker.net:2376/'
+ expected_result = 'http://myhost.docker.net:2376'
+ assert parse_host(host_value) == expected_result
+
+
+class ParseRepositoryTagTest(unittest.TestCase):
+ sha = 'e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855'
+
+ def test_index_image_no_tag(self):
+ assert parse_repository_tag("root") == ("root", None)
+
+ def test_index_image_tag(self):
+ assert parse_repository_tag("root:tag") == ("root", "tag")
+
+ def test_index_user_image_no_tag(self):
+ assert parse_repository_tag("user/repo") == ("user/repo", None)
+
+ def test_index_user_image_tag(self):
+ assert parse_repository_tag("user/repo:tag") == ("user/repo", "tag")
+
+ def test_private_reg_image_no_tag(self):
+ assert parse_repository_tag("url:5000/repo") == ("url:5000/repo", None)
+
+ def test_private_reg_image_tag(self):
+ assert parse_repository_tag("url:5000/repo:tag") == (
+ "url:5000/repo", "tag"
+ )
+
+ def test_index_image_sha(self):
+ assert parse_repository_tag("root@sha256:{sha}".format(sha=self.sha)) == (
+ "root", "sha256:{sha}".format(sha=self.sha)
+ )
+
+ def test_private_reg_image_sha(self):
+ assert parse_repository_tag(
+ "url:5000/repo@sha256:{sha}".format(sha=self.sha)
+ ) == ("url:5000/repo", "sha256:{sha}".format(sha=self.sha))
+
+
+class ParseDeviceTest(unittest.TestCase):
+ def test_dict(self):
+ devices = parse_devices([{
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'r'
+ }])
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'r'
+ }
+
+ def test_partial_string_definition(self):
+ devices = parse_devices(['/dev/sda1'])
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/sda1',
+ 'CgroupPermissions': 'rwm'
+ }
+
+ def test_permissionless_string_definition(self):
+ devices = parse_devices(['/dev/sda1:/dev/mnt1'])
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'rwm'
+ }
+
+ def test_full_string_definition(self):
+ devices = parse_devices(['/dev/sda1:/dev/mnt1:r'])
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'r'
+ }
+
+ def test_hybrid_list(self):
+ devices = parse_devices([
+ '/dev/sda1:/dev/mnt1:rw',
+ {
+ 'PathOnHost': '/dev/sda2',
+ 'PathInContainer': '/dev/mnt2',
+ 'CgroupPermissions': 'r'
+ }
+ ])
+
+ assert devices[0] == {
+ 'PathOnHost': '/dev/sda1',
+ 'PathInContainer': '/dev/mnt1',
+ 'CgroupPermissions': 'rw'
+ }
+ assert devices[1] == {
+ 'PathOnHost': '/dev/sda2',
+ 'PathInContainer': '/dev/mnt2',
+ 'CgroupPermissions': 'r'
+ }
+
+
+class ParseBytesTest(unittest.TestCase):
+ def test_parse_bytes_valid(self):
+ assert parse_bytes("512MB") == 536870912
+ assert parse_bytes("512M") == 536870912
+ assert parse_bytes("512m") == 536870912
+
+ def test_parse_bytes_invalid(self):
+ with pytest.raises(DockerException):
+ parse_bytes("512MK")
+ with pytest.raises(DockerException):
+ parse_bytes("512L")
+ with pytest.raises(DockerException):
+ parse_bytes("127.0.0.1K")
+
+ def test_parse_bytes_float(self):
+ assert parse_bytes("1.5k") == 1536
+
+
+class UtilsTest(unittest.TestCase):
+ longMessage = True
+
+ def test_convert_filters(self):
+ tests = [
+ ({'dangling': True}, '{"dangling": ["true"]}'),
+ ({'dangling': "true"}, '{"dangling": ["true"]}'),
+ ({'exited': 0}, '{"exited": ["0"]}'),
+ ({'exited': [0, 1]}, '{"exited": ["0", "1"]}'),
+ ]
+
+ for filters, expected in tests:
+ assert convert_filters(filters) == expected
+
+ def test_decode_json_header(self):
+ obj = {'a': 'b', 'c': 1}
+ data = None
+ if PY3:
+ data = base64.urlsafe_b64encode(bytes(json.dumps(obj), 'utf-8'))
+ else:
+ data = base64.urlsafe_b64encode(json.dumps(obj))
+ decoded_data = decode_json_header(data)
+ assert obj == decoded_data
+
+
+class SplitCommandTest(unittest.TestCase):
+ def test_split_command_with_unicode(self):
+ assert split_command(u'echo μμ') == ['echo', 'μμ']
+
+ @pytest.mark.skipif(PY3, reason="shlex doesn't support bytes in py3")
+ def test_split_command_with_bytes(self):
+ assert split_command('echo μμ') == ['echo', 'μμ']
+
+
+class FormatEnvironmentTest(unittest.TestCase):
+ def test_format_env_binary_unicode_value(self):
+ env_dict = {
+ 'ARTIST_NAME': b'\xec\x86\xa1\xec\xa7\x80\xec\x9d\x80'
+ }
+ assert format_environment(env_dict) == [u'ARTIST_NAME=송지은']
+
+ def test_format_env_no_value(self):
+ env_dict = {
+ 'FOO': None,
+ 'BAR': '',
+ }
+ assert sorted(format_environment(env_dict)) == ['BAR=', 'FOO']
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/ca.pem b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/ca.pem
new file mode 100644
index 00000000..5c7093a3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/ca.pem
@@ -0,0 +1,7 @@
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/cert.pem b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/cert.pem
new file mode 100644
index 00000000..5c7093a3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/cert.pem
@@ -0,0 +1,7 @@
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/key.pem b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/key.pem
new file mode 100644
index 00000000..5c7093a3
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/_api/utils/testdata/certs/key.pem
@@ -0,0 +1,7 @@
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/test__scramble.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/test__scramble.py
new file mode 100644
index 00000000..ff004306
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/test__scramble.py
@@ -0,0 +1,28 @@
+# Copyright 2022 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.docker.plugins.module_utils._scramble import (
+ scramble,
+ unscramble,
+)
+
+
+@pytest.mark.parametrize('plaintext, key, scrambled', [
+ (u'', b'0', '=S='),
+ (u'hello', b'\x00', '=S=aGVsbG8='),
+ (u'hello', b'\x01', '=S=aWRtbW4='),
+])
+def test_scramble_unscramble(plaintext, key, scrambled):
+ scrambled_ = scramble(plaintext, key)
+ print('{0!r} == {1!r}'.format(scrambled_, scrambled))
+ assert scrambled_ == scrambled
+
+ plaintext_ = unscramble(scrambled, key)
+ print('{0!r} == {1!r}'.format(plaintext_, plaintext))
+ assert plaintext_ == plaintext
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_copy.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_copy.py
new file mode 100644
index 00000000..3668573b
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_copy.py
@@ -0,0 +1,77 @@
+# Copyright 2022 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.docker.plugins.module_utils.copy import (
+ _stream_generator_to_fileobj,
+)
+
+
+def _simple_generator(sequence):
+ for elt in sequence:
+ yield elt
+
+
+@pytest.mark.parametrize('chunks, read_sizes', [
+ (
+ [
+ (1, b'1'),
+ (1, b'2'),
+ (1, b'3'),
+ (1, b'4'),
+ ],
+ [
+ 1,
+ 2,
+ 3,
+ ]
+ ),
+ (
+ [
+ (1, b'123'),
+ (1, b'456'),
+ (1, b'789'),
+ ],
+ [
+ 1,
+ 4,
+ 2,
+ 2,
+ 2,
+ ]
+ ),
+ (
+ [
+ (10 * 1024 * 1024, b'0'),
+ (10 * 1024 * 1024, b'1'),
+ ],
+ [
+ 1024 * 1024 - 5,
+ 5 * 1024 * 1024 - 3,
+ 10 * 1024 * 1024 - 2,
+ 2 * 1024 * 1024 - 1,
+ 2 * 1024 * 1024 + 5 + 3 + 2 + 1,
+ ]
+ ),
+])
+def test__stream_generator_to_fileobj(chunks, read_sizes):
+ chunks = [count * data for count, data in chunks]
+ stream = _simple_generator(chunks)
+ expected = b''.join(chunks)
+
+ buffer = b''
+ totally_read = 0
+ f = _stream_generator_to_fileobj(stream)
+ for read_size in read_sizes:
+ chunk = f.read(read_size)
+ assert len(chunk) == min(read_size, len(expected) - len(buffer))
+ buffer += chunk
+ totally_read += read_size
+
+ assert buffer == expected[:len(buffer)]
+ assert min(totally_read, len(expected)) == len(buffer)
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_image_archive.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_image_archive.py
new file mode 100644
index 00000000..10573b96
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_image_archive.py
@@ -0,0 +1,94 @@
+# Copyright 2022 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+import tarfile
+
+from ansible_collections.community.docker.plugins.module_utils.image_archive import (
+ api_image_id,
+ archived_image_manifest,
+ ImageArchiveInvalidException
+)
+
+from ..test_support.docker_image_archive_stubbing import (
+ write_imitation_archive,
+ write_imitation_archive_with_manifest,
+ write_irrelevant_tar,
+)
+
+
+@pytest.fixture
+def tar_file_name(tmpdir):
+ '''
+ Return the name of a non-existing tar file in an existing temporary directory.
+ '''
+
+ # Cast to str required by Python 2.x
+ return str(tmpdir.join('foo.tar'))
+
+
+@pytest.mark.parametrize('expected, value', [
+ ('sha256:foo', 'foo'),
+ ('sha256:bar', 'bar')
+])
+def test_api_image_id_from_archive_id(expected, value):
+ assert api_image_id(value) == expected
+
+
+def test_archived_image_manifest_extracts(tar_file_name):
+ expected_id = "abcde12345"
+ expected_tags = ["foo:latest", "bar:v1"]
+
+ write_imitation_archive(tar_file_name, expected_id, expected_tags)
+
+ actual = archived_image_manifest(tar_file_name)
+
+ assert actual.image_id == expected_id
+ assert actual.repo_tags == expected_tags
+
+
+def test_archived_image_manifest_extracts_nothing_when_file_not_present(tar_file_name):
+ image_id = archived_image_manifest(tar_file_name)
+
+ assert image_id is None
+
+
+def test_archived_image_manifest_raises_when_file_not_a_tar():
+ try:
+ archived_image_manifest(__file__)
+ raise AssertionError()
+ except ImageArchiveInvalidException as e:
+ assert isinstance(e.cause, tarfile.ReadError)
+ assert str(__file__) in str(e)
+
+
+def test_archived_image_manifest_raises_when_tar_missing_manifest(tar_file_name):
+ write_irrelevant_tar(tar_file_name)
+
+ try:
+ archived_image_manifest(tar_file_name)
+ raise AssertionError()
+ except ImageArchiveInvalidException as e:
+ assert isinstance(e.cause, KeyError)
+ assert 'manifest.json' in str(e.cause)
+
+
+def test_archived_image_manifest_raises_when_manifest_missing_id(tar_file_name):
+ manifest = [
+ {
+ 'foo': 'bar'
+ }
+ ]
+
+ write_imitation_archive_with_manifest(tar_file_name, manifest)
+
+ try:
+ archived_image_manifest(tar_file_name)
+ raise AssertionError()
+ except ImageArchiveInvalidException as e:
+ assert isinstance(e.cause, KeyError)
+ assert 'Config' in str(e.cause)
diff --git a/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_util.py b/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_util.py
new file mode 100644
index 00000000..c7d36212
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/module_utils/test_util.py
@@ -0,0 +1,522 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ compare_dict_allow_more_present,
+ compare_generic,
+ convert_duration_to_nanosecond,
+ parse_healthcheck
+)
+
+DICT_ALLOW_MORE_PRESENT = (
+ {
+ 'av': {},
+ 'bv': {'a': 1},
+ 'result': True
+ },
+ {
+ 'av': {'a': 1},
+ 'bv': {'a': 1, 'b': 2},
+ 'result': True
+ },
+ {
+ 'av': {'a': 1},
+ 'bv': {'b': 2},
+ 'result': False
+ },
+ {
+ 'av': {'a': 1},
+ 'bv': {'a': None, 'b': 1},
+ 'result': False
+ },
+ {
+ 'av': {'a': None},
+ 'bv': {'b': 1},
+ 'result': False
+ },
+)
+
+COMPARE_GENERIC = [
+ ########################################################################################
+ # value
+ {
+ 'a': 1,
+ 'b': 2,
+ 'method': 'strict',
+ 'type': 'value',
+ 'result': False
+ },
+ {
+ 'a': 'hello',
+ 'b': 'hello',
+ 'method': 'strict',
+ 'type': 'value',
+ 'result': True
+ },
+ {
+ 'a': None,
+ 'b': 'hello',
+ 'method': 'strict',
+ 'type': 'value',
+ 'result': False
+ },
+ {
+ 'a': None,
+ 'b': None,
+ 'method': 'strict',
+ 'type': 'value',
+ 'result': True
+ },
+ {
+ 'a': 1,
+ 'b': 2,
+ 'method': 'ignore',
+ 'type': 'value',
+ 'result': True
+ },
+ {
+ 'a': None,
+ 'b': 2,
+ 'method': 'ignore',
+ 'type': 'value',
+ 'result': True
+ },
+ ########################################################################################
+ # list
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'y',
+ ],
+ 'method': 'strict',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'x',
+ 'x',
+ ],
+ 'method': 'strict',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'strict',
+ 'type': 'list',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'y',
+ 'x',
+ ],
+ 'method': 'strict',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'list',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'list',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ 'z',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ 'x',
+ 'z',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'list',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'y',
+ 'x',
+ ],
+ 'method': 'ignore',
+ 'type': 'list',
+ 'result': True
+ },
+ ########################################################################################
+ # set
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'y',
+ ],
+ 'method': 'strict',
+ 'type': 'set',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'x',
+ 'x',
+ ],
+ 'method': 'strict',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'strict',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'y',
+ 'x',
+ ],
+ 'method': 'strict',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set',
+ 'result': False
+ },
+ {
+ 'a': [
+ 'x',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'x',
+ 'y',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'z',
+ ],
+ 'b': [
+ 'x',
+ 'y',
+ 'x',
+ 'z',
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set',
+ 'result': True
+ },
+ {
+ 'a': [
+ 'x',
+ 'a',
+ ],
+ 'b': [
+ 'y',
+ 'z',
+ ],
+ 'method': 'ignore',
+ 'type': 'set',
+ 'result': True
+ },
+ ########################################################################################
+ # set(dict)
+ {
+ 'a': [
+ {'x': 1},
+ ],
+ 'b': [
+ {'y': 1},
+ ],
+ 'method': 'strict',
+ 'type': 'set(dict)',
+ 'result': False
+ },
+ {
+ 'a': [
+ {'x': 1},
+ ],
+ 'b': [
+ {'x': 1},
+ ],
+ 'method': 'strict',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1},
+ ],
+ 'b': [
+ {'x': 1, 'y': 2},
+ ],
+ 'method': 'strict',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1},
+ {'x': 2, 'y': 3},
+ ],
+ 'b': [
+ {'x': 1},
+ {'x': 2, 'y': 3},
+ ],
+ 'method': 'strict',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1},
+ ],
+ 'b': [
+ {'x': 1, 'z': 2},
+ {'x': 2, 'y': 3},
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1, 'y': 2},
+ ],
+ 'b': [
+ {'x': 1},
+ {'x': 2, 'y': 3},
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set(dict)',
+ 'result': False
+ },
+ {
+ 'a': [
+ {'x': 1, 'y': 3},
+ ],
+ 'b': [
+ {'x': 1},
+ {'x': 1, 'y': 3, 'z': 4},
+ ],
+ 'method': 'allow_more_present',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ {
+ 'a': [
+ {'x': 1},
+ {'x': 2, 'y': 3},
+ ],
+ 'b': [
+ {'x': 1},
+ ],
+ 'method': 'ignore',
+ 'type': 'set(dict)',
+ 'result': True
+ },
+ ########################################################################################
+ # dict
+ {
+ 'a': {'x': 1},
+ 'b': {'y': 1},
+ 'method': 'strict',
+ 'type': 'dict',
+ 'result': False
+ },
+ {
+ 'a': {'x': 1},
+ 'b': {'x': 1, 'y': 2},
+ 'method': 'strict',
+ 'type': 'dict',
+ 'result': False
+ },
+ {
+ 'a': {'x': 1},
+ 'b': {'x': 1},
+ 'method': 'strict',
+ 'type': 'dict',
+ 'result': True
+ },
+ {
+ 'a': {'x': 1, 'z': 2},
+ 'b': {'x': 1, 'y': 2},
+ 'method': 'strict',
+ 'type': 'dict',
+ 'result': False
+ },
+ {
+ 'a': {'x': 1, 'z': 2},
+ 'b': {'x': 1, 'y': 2},
+ 'method': 'ignore',
+ 'type': 'dict',
+ 'result': True
+ },
+] + [{
+ 'a': entry['av'],
+ 'b': entry['bv'],
+ 'method': 'allow_more_present',
+ 'type': 'dict',
+ 'result': entry['result']
+} for entry in DICT_ALLOW_MORE_PRESENT]
+
+
+@pytest.mark.parametrize("entry", DICT_ALLOW_MORE_PRESENT)
+def test_dict_allow_more_present(entry):
+ assert compare_dict_allow_more_present(entry['av'], entry['bv']) == entry['result']
+
+
+@pytest.mark.parametrize("entry", COMPARE_GENERIC)
+def test_compare_generic(entry):
+ assert compare_generic(entry['a'], entry['b'], entry['method'], entry['type']) == entry['result']
+
+
+def test_convert_duration_to_nanosecond():
+ nanoseconds = convert_duration_to_nanosecond('5s')
+ assert nanoseconds == 5000000000
+ nanoseconds = convert_duration_to_nanosecond('1m5s')
+ assert nanoseconds == 65000000000
+ with pytest.raises(ValueError):
+ convert_duration_to_nanosecond([1, 2, 3])
+ with pytest.raises(ValueError):
+ convert_duration_to_nanosecond('10x')
+
+
+def test_parse_healthcheck():
+ result, disabled = parse_healthcheck({
+ 'test': 'sleep 1',
+ 'interval': '1s',
+ })
+ assert disabled is False
+ assert result == {
+ 'test': ['CMD-SHELL', 'sleep 1'],
+ 'interval': 1000000000
+ }
+
+ result, disabled = parse_healthcheck({
+ 'test': ['NONE'],
+ })
+ assert result is None
+ assert disabled
+
+ result, disabled = parse_healthcheck({
+ 'test': 'sleep 1',
+ 'interval': '1s423ms'
+ })
+ assert result == {
+ 'test': ['CMD-SHELL', 'sleep 1'],
+ 'interval': 1423000000
+ }
+ assert disabled is False
+
+ result, disabled = parse_healthcheck({
+ 'test': 'sleep 1',
+ 'interval': '1h1m2s3ms4us'
+ })
+ assert result == {
+ 'test': ['CMD-SHELL', 'sleep 1'],
+ 'interval': 3662003004000
+ }
+ assert disabled is False
diff --git a/ansible_collections/community/docker/tests/unit/plugins/modules/conftest.py b/ansible_collections/community/docker/tests/unit/plugins/modules/conftest.py
new file mode 100644
index 00000000..0ed3dd44
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/modules/conftest.py
@@ -0,0 +1,32 @@
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+
+import pytest
+
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.text.converters import to_bytes
+from ansible.module_utils.common._collections_compat import MutableMapping
+
+
+@pytest.fixture
+def patch_ansible_module(request, mocker):
+ if isinstance(request.param, string_types):
+ args = request.param
+ elif isinstance(request.param, MutableMapping):
+ if 'ANSIBLE_MODULE_ARGS' not in request.param:
+ request.param = {'ANSIBLE_MODULE_ARGS': request.param}
+ if '_ansible_remote_tmp' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_remote_tmp'] = '/tmp'
+ if '_ansible_keep_remote_files' not in request.param['ANSIBLE_MODULE_ARGS']:
+ request.param['ANSIBLE_MODULE_ARGS']['_ansible_keep_remote_files'] = False
+ args = json.dumps(request.param)
+ else:
+ raise Exception('Malformed data to the patch_ansible_module pytest fixture')
+
+ mocker.patch('ansible.module_utils.basic._ANSIBLE_ARGS', to_bytes(args))
diff --git a/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_image.py b/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_image.py
new file mode 100644
index 00000000..3401837f
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_image.py
@@ -0,0 +1,114 @@
+# Copyright 2022 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.docker.plugins.modules.docker_image import ImageManager
+
+from ansible_collections.community.docker.plugins.module_utils.image_archive import api_image_id
+
+from ..test_support.docker_image_archive_stubbing import (
+ write_imitation_archive,
+ write_irrelevant_tar,
+)
+
+
+def assert_no_logging(msg):
+ raise AssertionError('Should not have logged anything but logged %s' % msg)
+
+
+def capture_logging(messages):
+ def capture(msg):
+ messages.append(msg)
+
+ return capture
+
+
+@pytest.fixture
+def tar_file_name(tmpdir):
+ """
+ Return the name of a non-existing tar file in an existing temporary directory.
+ """
+
+ # Cast to str required by Python 2.x
+ return str(tmpdir.join('foo.tar'))
+
+
+def test_archived_image_action_when_missing(tar_file_name):
+ fake_name = 'a:latest'
+ fake_id = 'a1'
+
+ expected = 'Archived image %s to %s, since none present' % (fake_name, tar_file_name)
+
+ actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, fake_name, api_image_id(fake_id))
+
+ assert actual == expected
+
+
+def test_archived_image_action_when_current(tar_file_name):
+ fake_name = 'b:latest'
+ fake_id = 'b2'
+
+ write_imitation_archive(tar_file_name, fake_id, [fake_name])
+
+ actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, fake_name, api_image_id(fake_id))
+
+ assert actual is None
+
+
+def test_archived_image_action_when_invalid(tar_file_name):
+ fake_name = 'c:1.2.3'
+ fake_id = 'c3'
+
+ write_irrelevant_tar(tar_file_name)
+
+ expected = 'Archived image %s to %s, overwriting an unreadable archive file' % (fake_name, tar_file_name)
+
+ actual_log = []
+ actual = ImageManager.archived_image_action(
+ capture_logging(actual_log),
+ tar_file_name,
+ fake_name,
+ api_image_id(fake_id)
+ )
+
+ assert actual == expected
+
+ assert len(actual_log) == 1
+ assert actual_log[0].startswith('Unable to extract manifest summary from archive')
+
+
+def test_archived_image_action_when_obsolete_by_id(tar_file_name):
+ fake_name = 'd:0.0.1'
+ old_id = 'e5'
+ new_id = 'd4'
+
+ write_imitation_archive(tar_file_name, old_id, [fake_name])
+
+ expected = 'Archived image %s to %s, overwriting archive with image %s named %s' % (
+ fake_name, tar_file_name, old_id, fake_name
+ )
+ actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, fake_name, api_image_id(new_id))
+
+ assert actual == expected
+
+
+def test_archived_image_action_when_obsolete_by_name(tar_file_name):
+ old_name = 'hi'
+ new_name = 'd:0.0.1'
+ fake_id = 'd4'
+
+ write_imitation_archive(tar_file_name, fake_id, [old_name])
+
+ expected = 'Archived image %s to %s, overwriting archive with image %s named %s' % (
+ new_name, tar_file_name, fake_id, old_name
+ )
+ actual = ImageManager.archived_image_action(assert_no_logging, tar_file_name, new_name, api_image_id(fake_id))
+
+ print('actual : %s', actual)
+ print('expected : %s', expected)
+ assert actual == expected
diff --git a/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_network.py b/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_network.py
new file mode 100644
index 00000000..a937c6db
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_network.py
@@ -0,0 +1,35 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Unit tests for docker_network."""
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+from ansible_collections.community.docker.plugins.modules.docker_network import validate_cidr
+
+
+@pytest.mark.parametrize("cidr,expected", [
+ ('192.168.0.1/16', 'ipv4'),
+ ('192.168.0.1/24', 'ipv4'),
+ ('192.168.0.1/32', 'ipv4'),
+ ('fdd1:ac8c:0557:7ce2::/64', 'ipv6'),
+ ('fdd1:ac8c:0557:7ce2::/128', 'ipv6'),
+])
+def test_validate_cidr_positives(cidr, expected):
+ assert validate_cidr(cidr) == expected
+
+
+@pytest.mark.parametrize("cidr", [
+ '192.168.0.1',
+ '192.168.0.1/34',
+ '192.168.0.1/asd',
+ 'fdd1:ac8c:0557:7ce2::',
+])
+def test_validate_cidr_negatives(cidr):
+ with pytest.raises(ValueError) as e:
+ validate_cidr(cidr)
+ assert '"{0}" is not a valid CIDR'.format(cidr) == str(e.value)
diff --git a/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_swarm_service.py b/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_swarm_service.py
new file mode 100644
index 00000000..1cef623b
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/modules/test_docker_swarm_service.py
@@ -0,0 +1,514 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import pytest
+
+
+class APIErrorMock(Exception):
+ def __init__(self, message, response=None, explanation=None):
+ self.message = message
+ self.response = response
+ self.explanation = explanation
+
+
+@pytest.fixture(autouse=True)
+def docker_module_mock(mocker):
+ docker_module_mock = mocker.MagicMock()
+ docker_utils_module_mock = mocker.MagicMock()
+ docker_errors_module_mock = mocker.MagicMock()
+ docker_errors_module_mock.APIError = APIErrorMock
+ mock_modules = {
+ 'docker': docker_module_mock,
+ 'docker.utils': docker_utils_module_mock,
+ 'docker.errors': docker_errors_module_mock,
+ }
+ return mocker.patch.dict('sys.modules', **mock_modules)
+
+
+@pytest.fixture(autouse=True)
+def docker_swarm_service():
+ from ansible_collections.community.docker.plugins.modules import docker_swarm_service
+
+ return docker_swarm_service
+
+
+def test_retry_on_out_of_sequence_error(mocker, docker_swarm_service):
+ run_mock = mocker.MagicMock(
+ side_effect=APIErrorMock(
+ message='',
+ response=None,
+ explanation='rpc error: code = Unknown desc = update out of sequence',
+ )
+ )
+ manager = docker_swarm_service.DockerServiceManager(client=None)
+ manager.run = run_mock
+ with pytest.raises(APIErrorMock):
+ manager.run_safe()
+ assert run_mock.call_count == 3
+
+
+def test_no_retry_on_general_api_error(mocker, docker_swarm_service):
+ run_mock = mocker.MagicMock(
+ side_effect=APIErrorMock(message='', response=None, explanation='some error')
+ )
+ manager = docker_swarm_service.DockerServiceManager(client=None)
+ manager.run = run_mock
+ with pytest.raises(APIErrorMock):
+ manager.run_safe()
+ assert run_mock.call_count == 1
+
+
+def test_get_docker_environment(mocker, docker_swarm_service):
+ env_file_result = {'TEST1': 'A', 'TEST2': 'B', 'TEST3': 'C'}
+ env_dict = {'TEST3': 'CC', 'TEST4': 'D'}
+ env_string = "TEST3=CC,TEST4=D"
+
+ env_list = ['TEST3=CC', 'TEST4=D']
+ expected_result = sorted(['TEST1=A', 'TEST2=B', 'TEST3=CC', 'TEST4=D'])
+ mocker.patch.object(
+ docker_swarm_service, 'parse_env_file', return_value=env_file_result
+ )
+ mocker.patch.object(
+ docker_swarm_service,
+ 'format_environment',
+ side_effect=lambda d: ['{0}={1}'.format(key, value) for key, value in d.items()],
+ )
+ # Test with env dict and file
+ result = docker_swarm_service.get_docker_environment(
+ env_dict, env_files=['dummypath']
+ )
+ assert result == expected_result
+ # Test with env list and file
+ result = docker_swarm_service.get_docker_environment(
+ env_list,
+ env_files=['dummypath']
+ )
+ assert result == expected_result
+ # Test with env string and file
+ result = docker_swarm_service.get_docker_environment(
+ env_string, env_files=['dummypath']
+ )
+ assert result == expected_result
+
+ assert result == expected_result
+ # Test with empty env
+ result = docker_swarm_service.get_docker_environment(
+ [], env_files=None
+ )
+ assert result == []
+ # Test with empty env_files
+ result = docker_swarm_service.get_docker_environment(
+ None, env_files=[]
+ )
+ assert result == []
+
+
+def test_get_nanoseconds_from_raw_option(docker_swarm_service):
+ value = docker_swarm_service.get_nanoseconds_from_raw_option('test', None)
+ assert value is None
+
+ value = docker_swarm_service.get_nanoseconds_from_raw_option('test', '1m30s535ms')
+ assert value == 90535000000
+
+ value = docker_swarm_service.get_nanoseconds_from_raw_option('test', 10000000000)
+ assert value == 10000000000
+
+ with pytest.raises(ValueError):
+ docker_swarm_service.get_nanoseconds_from_raw_option('test', [])
+
+
+def test_has_dict_changed(docker_swarm_service):
+ assert not docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ {"a": 1},
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ {"a": 1, "b": 2}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ {"a": 2, "b": 2}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {"a": 1, "b": 1},
+ {"a": 1}
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ None,
+ {"a": 2, "b": 2}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {},
+ {"a": 2, "b": 2}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ {}
+ )
+ assert docker_swarm_service.has_dict_changed(
+ {"a": 1},
+ None
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ {},
+ {}
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ None,
+ None
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ {},
+ None
+ )
+ assert not docker_swarm_service.has_dict_changed(
+ None,
+ {}
+ )
+
+
+def test_has_list_changed(docker_swarm_service):
+
+ # List comparisons without dictionaries
+ # I could improve the indenting, but pycodestyle wants this instead
+ assert not docker_swarm_service.has_list_changed(None, None)
+ assert not docker_swarm_service.has_list_changed(None, [])
+ assert not docker_swarm_service.has_list_changed(None, [1, 2])
+
+ assert not docker_swarm_service.has_list_changed([], None)
+ assert not docker_swarm_service.has_list_changed([], [])
+ assert docker_swarm_service.has_list_changed([], [1, 2])
+
+ assert docker_swarm_service.has_list_changed([1, 2], None)
+ assert docker_swarm_service.has_list_changed([1, 2], [])
+
+ assert docker_swarm_service.has_list_changed([1, 2, 3], [1, 2])
+ assert docker_swarm_service.has_list_changed([1, 2], [1, 2, 3])
+
+ # Check list sorting
+ assert not docker_swarm_service.has_list_changed([1, 2], [2, 1])
+ assert docker_swarm_service.has_list_changed(
+ [1, 2],
+ [2, 1],
+ sort_lists=False
+ )
+
+ # Check type matching
+ assert docker_swarm_service.has_list_changed([None, 1], [2, 1])
+ assert docker_swarm_service.has_list_changed([2, 1], [None, 1])
+ assert docker_swarm_service.has_list_changed(
+ "command --with args",
+ ['command', '--with', 'args']
+ )
+ assert docker_swarm_service.has_list_changed(
+ ['sleep', '3400'],
+ [u'sleep', u'3600'],
+ sort_lists=False
+ )
+
+ # List comparisons with dictionaries
+ assert not docker_swarm_service.has_list_changed(
+ [{'a': 1}],
+ [{'a': 1}],
+ sort_key='a'
+ )
+
+ assert not docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 1}, {'a': 2}],
+ sort_key='a'
+ )
+
+ with pytest.raises(Exception):
+ docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 1}, {'a': 2}]
+ )
+
+ # List sort checking with sort key
+ assert not docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 2}, {'a': 1}],
+ sort_key='a'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 2}, {'a': 1}],
+ sort_lists=False
+ )
+
+ assert docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}, {'a': 3}],
+ [{'a': 2}, {'a': 1}],
+ sort_key='a'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [{'a': 1}, {'a': 2}],
+ [{'a': 1}, {'a': 2}, {'a': 3}],
+ sort_lists=False
+ )
+
+ # Additional dictionary elements
+ assert not docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 2},
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ ],
+ [
+ {"src": 1, "dst": 2, "protocol": "tcp"},
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ ],
+ sort_key='dst'
+ )
+ assert not docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 3, "protocol": "tcp"},
+ ],
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 3, "protocol": "tcp"},
+ ],
+ sort_key='dst'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 2},
+ {"src": 3, "dst": 4},
+ ],
+ [
+ {"src": 1, "dst": 3, "protocol": "udp"},
+ {"src": 1, "dst": 2, "protocol": "tcp"},
+ {"src": 3, "dst": 4, "protocol": "tcp"},
+ ],
+ sort_key='dst'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 3, "protocol": "tcp"},
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ ],
+ [
+ {"src": 1, "dst": 2, "protocol": "tcp"},
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ ],
+ sort_key='dst'
+ )
+ assert docker_swarm_service.has_list_changed(
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 2, "protocol": "tcp", "extra": {"test": "foo"}},
+ ],
+ [
+ {"src": 1, "dst": 2, "protocol": "udp"},
+ {"src": 1, "dst": 2, "protocol": "tcp"},
+ ],
+ sort_key='dst'
+ )
+ assert not docker_swarm_service.has_list_changed(
+ [{'id': '123', 'aliases': []}],
+ [{'id': '123'}],
+ sort_key='id'
+ )
+
+
+def test_have_networks_changed(docker_swarm_service):
+ assert not docker_swarm_service.have_networks_changed(
+ None,
+ None
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [],
+ None
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [{'id': 1}],
+ [{'id': 1}]
+ )
+
+ assert docker_swarm_service.have_networks_changed(
+ [{'id': 1}],
+ [{'id': 1}, {'id': 2}]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [{'id': 1}, {'id': 2}],
+ [{'id': 1}, {'id': 2}]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [{'id': 1}, {'id': 2}],
+ [{'id': 2}, {'id': 1}]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': []}
+ ],
+ [
+ {'id': 1},
+ {'id': 2}
+ ]
+ )
+
+ assert docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1']}
+ ],
+ [
+ {'id': 1},
+ {'id': 2}
+ ]
+ )
+
+ assert docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}
+ ],
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1']}
+ ]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}
+ ],
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}
+ ]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}
+ ],
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias2', 'alias1']}
+ ]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1, 'options': {}},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}],
+ [
+ {'id': 1},
+ {'id': 2, 'aliases': ['alias2', 'alias1']}
+ ]
+ )
+
+ assert not docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1, 'options': {'option1': 'value1'}},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}],
+ [
+ {'id': 1, 'options': {'option1': 'value1'}},
+ {'id': 2, 'aliases': ['alias2', 'alias1']}
+ ]
+ )
+
+ assert docker_swarm_service.have_networks_changed(
+ [
+ {'id': 1, 'options': {'option1': 'value1'}},
+ {'id': 2, 'aliases': ['alias1', 'alias2']}],
+ [
+ {'id': 1, 'options': {'option1': 'value2'}},
+ {'id': 2, 'aliases': ['alias2', 'alias1']}
+ ]
+ )
+
+
+def test_get_docker_networks(docker_swarm_service):
+ network_names = [
+ 'network_1',
+ 'network_2',
+ 'network_3',
+ 'network_4',
+ ]
+ networks = [
+ network_names[0],
+ {'name': network_names[1]},
+ {'name': network_names[2], 'aliases': ['networkalias1']},
+ {'name': network_names[3], 'aliases': ['networkalias2'], 'options': {'foo': 'bar'}},
+ ]
+ network_ids = {
+ network_names[0]: '1',
+ network_names[1]: '2',
+ network_names[2]: '3',
+ network_names[3]: '4',
+ }
+ parsed_networks = docker_swarm_service.get_docker_networks(
+ networks,
+ network_ids
+ )
+ assert len(parsed_networks) == 4
+ for i, network in enumerate(parsed_networks):
+ assert 'name' not in network
+ assert 'id' in network
+ expected_name = network_names[i]
+ assert network['id'] == network_ids[expected_name]
+ if i == 2:
+ assert network['aliases'] == ['networkalias1']
+ if i == 3:
+ assert network['aliases'] == ['networkalias2']
+ if i == 3:
+ assert 'foo' in network['options']
+ # Test missing name
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks([{'invalid': 'err'}], {'err': 1})
+ # test for invalid aliases type
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'test', 'aliases': 1}],
+ {'test': 1}
+ )
+ # Test invalid aliases elements
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'test', 'aliases': [1]}],
+ {'test': 1}
+ )
+ # Test for invalid options type
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'test', 'options': 1}],
+ {'test': 1}
+ )
+ # Test for invalid networks type
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ 1,
+ {'test': 1}
+ )
+ # Test for non existing networks
+ with pytest.raises(ValueError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'idontexist'}],
+ {'test': 1}
+ )
+ # Test empty values
+ assert docker_swarm_service.get_docker_networks([], {}) == []
+ assert docker_swarm_service.get_docker_networks(None, {}) is None
+ # Test invalid options
+ with pytest.raises(TypeError):
+ docker_swarm_service.get_docker_networks(
+ [{'name': 'test', 'nonexisting_option': 'foo'}],
+ {'test': '1'}
+ )
diff --git a/ansible_collections/community/docker/tests/unit/plugins/test_support/docker_image_archive_stubbing.py b/ansible_collections/community/docker/tests/unit/plugins/test_support/docker_image_archive_stubbing.py
new file mode 100644
index 00000000..842ec4cf
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/plugins/test_support/docker_image_archive_stubbing.py
@@ -0,0 +1,76 @@
+# Copyright 2022 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import tarfile
+from tempfile import TemporaryFile
+
+
+def write_imitation_archive(file_name, image_id, repo_tags):
+ '''
+ Write a tar file meeting these requirements:
+
+ * Has a file manifest.json
+ * manifest.json contains a one-element array
+ * The element has a Config property with "[image_id].json" as the value name
+
+ :param file_name: Name of file to create
+ :type file_name: str
+ :param image_id: Fake sha256 hash (without the sha256: prefix)
+ :type image_id: str
+ :param repo_tags: list of fake image:tag's
+ :type repo_tags: list
+ '''
+
+ manifest = [
+ {
+ 'Config': '%s.json' % image_id,
+ 'RepoTags': repo_tags
+ }
+ ]
+
+ write_imitation_archive_with_manifest(file_name, manifest)
+
+
+def write_imitation_archive_with_manifest(file_name, manifest):
+ tf = tarfile.open(file_name, 'w')
+ try:
+ with TemporaryFile() as f:
+ f.write(json.dumps(manifest).encode('utf-8'))
+
+ ti = tarfile.TarInfo('manifest.json')
+ ti.size = f.tell()
+
+ f.seek(0)
+ tf.addfile(ti, f)
+
+ finally:
+ # In Python 2.6, this does not have __exit__
+ tf.close()
+
+
+def write_irrelevant_tar(file_name):
+ '''
+ Create a tar file that does not match the spec for "docker image save" / "docker image load" commands.
+
+ :param file_name: Name of tar file to create
+ :type file_name: str
+ '''
+
+ tf = tarfile.open(file_name, 'w')
+ try:
+ with TemporaryFile() as f:
+ f.write('Hello, world.'.encode('utf-8'))
+
+ ti = tarfile.TarInfo('hi.txt')
+ ti.size = f.tell()
+
+ f.seek(0)
+ tf.addfile(ti, f)
+
+ finally:
+ tf.close()
diff --git a/ansible_collections/community/docker/tests/unit/requirements.txt b/ansible_collections/community/docker/tests/unit/requirements.txt
new file mode 100644
index 00000000..386c97e2
--- /dev/null
+++ b/ansible_collections/community/docker/tests/unit/requirements.txt
@@ -0,0 +1,9 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+unittest2 ; python_version < '2.7'
+importlib ; python_version < '2.7'
+
+requests
+backports.ssl-match-hostname ; python_version < '3.5'
diff --git a/ansible_collections/community/docker/tests/utils/constraints.txt b/ansible_collections/community/docker/tests/utils/constraints.txt
new file mode 100644
index 00000000..d8fcb61d
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/constraints.txt
@@ -0,0 +1,25 @@
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+bcrypt < 3.2.0 ; python_version <= '3.6'
+certifi < 2022.5.18 ; python_version < '3.5' # certifi 2022.5.18 requires Python 3.5 or later
+cffi >= 1.14.2, != 1.14.3 # Yanked version which older versions of pip will still install
+coverage >= 4.2, < 5.0.0, != 4.3.2 ; python_version <= '3.7' # features in 4.2+ required, avoid known bug in 4.3.2 on python 2.6, coverage 5.0+ incompatible
+coverage >= 4.5.4, < 5.0.0 ; python_version > '3.7' # coverage had a bug in < 4.5.4 that would cause unit tests to hang in Python 3.8, coverage 5.0+ incompatible
+cryptography >= 1.3.0, < 2.2 ; python_version < '2.7' # cryptography 2.2 drops support for python 2.6
+cryptography >= 1.3.0, < 3.4 ; python_version < '3.6' # cryptography 3.4 drops support for python 2.7
+urllib3 < 1.24 ; python_version < '2.7' # urllib3 1.24 and later require python 2.7 or later
+wheel < 0.30.0 ; python_version < '2.7' # wheel 0.30.0 and later require python 2.7 or later
+paramiko < 2.4.0 ; python_version < '2.7' # paramiko 2.4.0 drops support for python 2.6
+paramiko < 3.0.0 ; python_version < '3.7' # paramiko 3.0.0 forces installation of a too new cryptography
+requests < 2.20.0 ; python_version < '2.7' # requests 2.20.0 drops support for python 2.6
+requests < 2.28 ; python_version < '3.7' # requests 2.28.0 drops support for python < 3.7
+virtualenv < 16.0.0 ; python_version < '2.7' # virtualenv 16.0.0 and later require python 2.7 or later
+pyopenssl < 18.0.0 ; python_version < '2.7' # pyOpenSSL 18.0.0 and later require python 2.7 or later
+setuptools < 45 ; python_version <= '2.7' # setuptools 45 and later require python 3.5 or later
+websocket-client < 1.0.0 ; python_version <= '3.6'
+
+# Restrict docker versions depending on Python version
+docker < 5.0.0 ; python_version <= '3.6'
+docker-compose < 1.25.0 ; python_version <= '3.6'
diff --git a/ansible_collections/community/docker/tests/utils/shippable/alpine.sh b/ansible_collections/community/docker/tests/utils/shippable/alpine.sh
new file mode 100755
index 00000000..157dd74e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/alpine.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+pyver=default
+
+# check for explicit python version like 8.3@3.8
+declare -a splitversion
+IFS='@' read -ra splitversion <<< "$version"
+
+if [ "${#splitversion[@]}" -gt 1 ]; then
+ version="${splitversion[0]}"
+ pyver="${splitversion[1]}"
+fi
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/${args[2]}/"
+else
+ target="azp/"
+fi
+
+force_python=""
+if [[ "${version}" =~ -pypi-latest$ ]]; then
+ version="${version/-pypi-latest}"
+ echo 'force_docker_sdk_for_python_pypi: true' >> tests/integration/interation_config.yml
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8\. ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --python "${pyver}" --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" ${force_python}
diff --git a/ansible_collections/community/docker/tests/utils/shippable/fedora.sh b/ansible_collections/community/docker/tests/utils/shippable/fedora.sh
new file mode 100755
index 00000000..157dd74e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/fedora.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+pyver=default
+
+# check for explicit python version like 8.3@3.8
+declare -a splitversion
+IFS='@' read -ra splitversion <<< "$version"
+
+if [ "${#splitversion[@]}" -gt 1 ]; then
+ version="${splitversion[0]}"
+ pyver="${splitversion[1]}"
+fi
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/${args[2]}/"
+else
+ target="azp/"
+fi
+
+force_python=""
+if [[ "${version}" =~ -pypi-latest$ ]]; then
+ version="${version/-pypi-latest}"
+ echo 'force_docker_sdk_for_python_pypi: true' >> tests/integration/interation_config.yml
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8\. ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --python "${pyver}" --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" ${force_python}
diff --git a/ansible_collections/community/docker/tests/utils/shippable/linux-community.sh b/ansible_collections/community/docker/tests/utils/shippable/linux-community.sh
new file mode 100755
index 00000000..78dc10a7
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/linux-community.sh
@@ -0,0 +1,22 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+image="${args[1]}"
+python="${args[2]}"
+
+if [ "${#args[@]}" -gt 3 ]; then
+ target="azp/${args[3]}/"
+else
+ target="azp/"
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --docker "quay.io/ansible-community/test-image:${image}" --python "${python}"
diff --git a/ansible_collections/community/docker/tests/utils/shippable/linux.sh b/ansible_collections/community/docker/tests/utils/shippable/linux.sh
new file mode 100755
index 00000000..9a5381f8
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/linux.sh
@@ -0,0 +1,21 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+image="${args[1]}"
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/${args[2]}/"
+else
+ target="azp/"
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --docker "${image}"
diff --git a/ansible_collections/community/docker/tests/utils/shippable/remote.sh b/ansible_collections/community/docker/tests/utils/shippable/remote.sh
new file mode 100755
index 00000000..157dd74e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/remote.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+pyver=default
+
+# check for explicit python version like 8.3@3.8
+declare -a splitversion
+IFS='@' read -ra splitversion <<< "$version"
+
+if [ "${#splitversion[@]}" -gt 1 ]; then
+ version="${splitversion[0]}"
+ pyver="${splitversion[1]}"
+fi
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/${args[2]}/"
+else
+ target="azp/"
+fi
+
+force_python=""
+if [[ "${version}" =~ -pypi-latest$ ]]; then
+ version="${version/-pypi-latest}"
+ echo 'force_docker_sdk_for_python_pypi: true' >> tests/integration/interation_config.yml
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8\. ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --python "${pyver}" --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" ${force_python}
diff --git a/ansible_collections/community/docker/tests/utils/shippable/rhel.sh b/ansible_collections/community/docker/tests/utils/shippable/rhel.sh
new file mode 100755
index 00000000..157dd74e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/rhel.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+pyver=default
+
+# check for explicit python version like 8.3@3.8
+declare -a splitversion
+IFS='@' read -ra splitversion <<< "$version"
+
+if [ "${#splitversion[@]}" -gt 1 ]; then
+ version="${splitversion[0]}"
+ pyver="${splitversion[1]}"
+fi
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/${args[2]}/"
+else
+ target="azp/"
+fi
+
+force_python=""
+if [[ "${version}" =~ -pypi-latest$ ]]; then
+ version="${version/-pypi-latest}"
+ echo 'force_docker_sdk_for_python_pypi: true' >> tests/integration/interation_config.yml
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8\. ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --python "${pyver}" --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" ${force_python}
diff --git a/ansible_collections/community/docker/tests/utils/shippable/sanity.sh b/ansible_collections/community/docker/tests/utils/shippable/sanity.sh
new file mode 100755
index 00000000..04b925bb
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/sanity.sh
@@ -0,0 +1,27 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+group="${args[1]}"
+
+if [ "${BASE_BRANCH:-}" ]; then
+ base_branch="origin/${BASE_BRANCH}"
+else
+ base_branch=""
+fi
+
+if [ "${group}" == "extra" ]; then
+ ../internal_test_tools/tools/run.py --color --bot --junit
+ exit
+fi
+
+# shellcheck disable=SC2086
+ansible-test sanity --color -v --junit ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \
+ --docker --base-branch "${base_branch}" \
+ --allow-disabled
diff --git a/ansible_collections/community/docker/tests/utils/shippable/shippable.sh b/ansible_collections/community/docker/tests/utils/shippable/shippable.sh
new file mode 100755
index 00000000..2ca96b88
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/shippable.sh
@@ -0,0 +1,233 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+ansible_version="${args[0]}"
+script="${args[1]}"
+
+function join {
+ local IFS="$1";
+ shift;
+ echo "$*";
+}
+
+# Ensure we can write other collections to this dir
+sudo chown -R "$(whoami)" "${PWD}/../../../"
+
+test="$(join / "${args[@]:1}")"
+
+docker images ansible/ansible
+docker images quay.io/ansible/*
+docker ps
+
+for container in $(docker ps --format '{{.Image}} {{.ID}}' | grep -v -e '^drydock/' -e '^quay.io/ansible/azure-pipelines-test-container:' | sed 's/^.* //'); do
+ docker rm -f "${container}" || true # ignore errors
+done
+
+docker ps
+
+if [ -d /home/shippable/cache/ ]; then
+ ls -la /home/shippable/cache/
+fi
+
+command -v python
+python -V
+
+function retry
+{
+ # shellcheck disable=SC2034
+ for repetition in 1 2 3; do
+ set +e
+ "$@"
+ result=$?
+ set -e
+ if [ ${result} == 0 ]; then
+ return ${result}
+ fi
+ echo "@* -> ${result}"
+ done
+ echo "Command '@*' failed 3 times!"
+ exit 255
+}
+
+command -v pip
+pip --version
+pip list --disable-pip-version-check
+if [ "${ansible_version}" == "devel" ]; then
+ retry pip install https://github.com/ansible/ansible/archive/devel.tar.gz --disable-pip-version-check
+else
+ retry pip install "https://github.com/ansible/ansible/archive/stable-${ansible_version}.tar.gz" --disable-pip-version-check
+fi
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then
+ export ANSIBLE_COLLECTIONS_PATHS="${HOME}/.ansible"
+ SHIPPABLE_RESULT_DIR="$(pwd)/shippable"
+ TEST_DIR="${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/docker"
+ mkdir -p "${TEST_DIR}"
+ cp -aT "${SHIPPABLE_BUILD_DIR}" "${TEST_DIR}"
+ cd "${TEST_DIR}"
+else
+ export ANSIBLE_COLLECTIONS_PATHS="${PWD}/../../../"
+fi
+
+if [ "${test}" == "sanity/extra" ]; then
+ retry pip install junit-xml --disable-pip-version-check
+fi
+
+# START: HACK
+if [ "${test}" == "sanity/extra" ]; then
+ # Nothing further should be added to this list.
+ # This is to prevent modules or plugins in this collection having a runtime dependency on other collections.
+ retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.internal_test_tools.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/internal_test_tools"
+ # NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429)
+ # retry ansible-galaxy -vvv collection install community.internal_test_tools
+fi
+
+if [ "${script}" != "sanity" ] && [ "${script}" != "units" ] && [ "${test}" != "sanity/extra" ]; then
+ # To prevent Python dependencies on other collections only install other collections for integration tests
+ retry git clone --depth=1 --single-branch https://github.com/ansible-collections/ansible.posix.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/ansible/posix"
+ retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.crypto.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/crypto"
+ retry git clone --depth=1 --single-branch https://github.com/ansible-collections/community.general.git "${ANSIBLE_COLLECTIONS_PATHS}/ansible_collections/community/general"
+ # NOTE: we're installing with git to work around Galaxy being a huge PITA (https://github.com/ansible/galaxy/issues/2429)
+ # retry ansible-galaxy -vvv collection install ansible.posix
+ # retry ansible-galaxy -vvv collection install community.crypto
+ # retry ansible-galaxy -vvv collection install community.general
+fi
+# END: HACK
+
+
+export PYTHONIOENCODING='utf-8'
+
+if [ "${JOB_TRIGGERED_BY_NAME:-}" == "nightly-trigger" ]; then
+ COVERAGE=yes
+ COMPLETE=yes
+fi
+
+if [ -n "${COVERAGE:-}" ]; then
+ # on-demand coverage reporting triggered by setting the COVERAGE environment variable to a non-empty value
+ export COVERAGE="--coverage"
+elif [[ "${COMMIT_MESSAGE}" =~ ci_coverage ]]; then
+ # on-demand coverage reporting triggered by having 'ci_coverage' in the latest commit message
+ export COVERAGE="--coverage"
+else
+ # on-demand coverage reporting disabled (default behavior, always-on coverage reporting remains enabled)
+ export COVERAGE="--coverage-check"
+fi
+
+if [ -n "${COMPLETE:-}" ]; then
+ # disable change detection triggered by setting the COMPLETE environment variable to a non-empty value
+ export CHANGED=""
+elif [[ "${COMMIT_MESSAGE}" =~ ci_complete ]]; then
+ # disable change detection triggered by having 'ci_complete' in the latest commit message
+ export CHANGED=""
+else
+ # enable change detection (default behavior)
+ export CHANGED="--changed"
+fi
+
+if [ "${IS_PULL_REQUEST:-}" == "true" ]; then
+ # run unstable tests which are targeted by focused changes on PRs
+ export UNSTABLE="--allow-unstable-changed"
+else
+ # do not run unstable tests outside PRs
+ export UNSTABLE=""
+fi
+
+# remove empty core/extras module directories from PRs created prior to the repo-merge
+find plugins -type d -empty -print -delete
+
+function cleanup
+{
+ # for complete on-demand coverage generate a report for all files with no coverage on the "sanity/5" job so we only have one copy
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ] && [ "${test}" == "sanity/5" ]; then
+ stub="--stub"
+ # trigger coverage reporting for stubs even if no other coverage data exists
+ mkdir -p tests/output/coverage/
+ else
+ stub=""
+ fi
+
+ if [ -d tests/output/coverage/ ]; then
+ if find tests/output/coverage/ -mindepth 1 -name '.*' -prune -o -print -quit | grep -q .; then
+ process_coverage='yes' # process existing coverage files
+ elif [ "${stub}" ]; then
+ process_coverage='yes' # process coverage when stubs are enabled
+ else
+ process_coverage=''
+ fi
+
+ if [ "${process_coverage}" ]; then
+ # use python 3.7 for coverage to avoid running out of memory during coverage xml processing
+ # only use it for coverage to avoid the additional overhead of setting up a virtual environment for a potential no-op job
+ virtualenv --python /usr/bin/python3.7 ~/ansible-venv
+ set +ux
+ . ~/ansible-venv/bin/activate
+ set -ux
+
+ # shellcheck disable=SC2086
+ ansible-test coverage xml --color -v --requirements --group-by command --group-by version ${stub:+"$stub"}
+ cp -a tests/output/reports/coverage=*.xml "$SHIPPABLE_RESULT_DIR/codecoverage/"
+
+ if [ "${ansible_version}" != "2.9" ]; then
+ # analyze and capture code coverage aggregated by integration test target
+ ansible-test coverage analyze targets generate -v "$SHIPPABLE_RESULT_DIR/testresults/coverage-analyze-targets.json"
+ fi
+
+ # upload coverage report to codecov.io only when using complete on-demand coverage
+ if [ "${COVERAGE}" == "--coverage" ] && [ "${CHANGED}" == "" ]; then
+ for file in tests/output/reports/coverage=*.xml; do
+ flags="${file##*/coverage=}"
+ flags="${flags%-powershell.xml}"
+ flags="${flags%.xml}"
+ # remove numbered component from stub files when converting to tags
+ flags="${flags//stub-[0-9]*/stub}"
+ flags="${flags//=/,}"
+ flags="${flags//[^a-zA-Z0-9_,]/_}"
+
+ bash <(curl -s https://ansible-ci-files.s3.us-east-1.amazonaws.com/codecov/codecov.sh) \
+ -f "${file}" \
+ -F "${flags}" \
+ -n "${test}" \
+ -t 8450ed26-4e94-4d07-8831-d2023d6d20a3 \
+ -X coveragepy \
+ -X gcov \
+ -X fix \
+ -X search \
+ -X xcode \
+ || echo "Failed to upload code coverage report to codecov.io: ${file}"
+ done
+ fi
+ fi
+ fi
+
+ if [ -d tests/output/junit/ ]; then
+ cp -aT tests/output/junit/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/data/ ]; then
+ cp -a tests/output/data/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+
+ if [ -d tests/output/bot/ ]; then
+ cp -aT tests/output/bot/ "$SHIPPABLE_RESULT_DIR/testresults/"
+ fi
+}
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then trap cleanup EXIT; fi
+
+if [[ "${COVERAGE:-}" == "--coverage" ]]; then
+ timeout=60
+else
+ timeout=50
+fi
+
+ansible-test env --dump --show --timeout "${timeout}" --color -v
+
+if [ "${SHIPPABLE_BUILD_ID:-}" ]; then "tests/utils/shippable/check_matrix.py"; fi
+"tests/utils/shippable/${script}.sh" "${test}"
diff --git a/ansible_collections/community/docker/tests/utils/shippable/ubuntu.sh b/ansible_collections/community/docker/tests/utils/shippable/ubuntu.sh
new file mode 100755
index 00000000..157dd74e
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/ubuntu.sh
@@ -0,0 +1,45 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+platform="${args[0]}"
+version="${args[1]}"
+pyver=default
+
+# check for explicit python version like 8.3@3.8
+declare -a splitversion
+IFS='@' read -ra splitversion <<< "$version"
+
+if [ "${#splitversion[@]}" -gt 1 ]; then
+ version="${splitversion[0]}"
+ pyver="${splitversion[1]}"
+fi
+
+if [ "${#args[@]}" -gt 2 ]; then
+ target="azp/${args[2]}/"
+else
+ target="azp/"
+fi
+
+force_python=""
+if [[ "${version}" =~ -pypi-latest$ ]]; then
+ version="${version/-pypi-latest}"
+ echo 'force_docker_sdk_for_python_pypi: true' >> tests/integration/interation_config.yml
+fi
+
+stage="${S:-prod}"
+provider="${P:-default}"
+
+if [ "${platform}" == "rhel" ] && [[ "${version}" =~ ^8\. ]]; then
+ echo "pynacl >= 1.4.0, < 1.5.0; python_version == '3.6'" >> tests/utils/constraints.txt
+fi
+
+# shellcheck disable=SC2086
+ansible-test integration --color -v --retry-on-error "${target}" ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} ${UNSTABLE:+"$UNSTABLE"} \
+ --python "${pyver}" --remote "${platform}/${version}" --remote-terminate always --remote-stage "${stage}" --remote-provider "${provider}" ${force_python}
diff --git a/ansible_collections/community/docker/tests/utils/shippable/units.sh b/ansible_collections/community/docker/tests/utils/shippable/units.sh
new file mode 100755
index 00000000..37685cb0
--- /dev/null
+++ b/ansible_collections/community/docker/tests/utils/shippable/units.sh
@@ -0,0 +1,29 @@
+#!/usr/bin/env bash
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+set -o pipefail -eux
+
+declare -a args
+IFS='/:' read -ra args <<< "$1"
+
+group="${args[1]}"
+
+if [[ "${COVERAGE:-}" == "--coverage" ]]; then
+ timeout=90
+else
+ timeout=30
+fi
+
+group1=()
+
+case "${group}" in
+ 1) options=("${group1[@]:+${group1[@]}}") ;;
+esac
+
+ansible-test env --timeout "${timeout}" --color -v
+
+# shellcheck disable=SC2086
+ansible-test units --color -v --docker default ${COVERAGE:+"$COVERAGE"} ${CHANGED:+"$CHANGED"} \
+ "${options[@]:+${options[@]}}" \