summaryrefslogtreecommitdiffstats
path: root/ansible_collections/community/docker/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-13 12:04:41 +0000
commit975f66f2eebe9dadba04f275774d4ab83f74cf25 (patch)
tree89bd26a93aaae6a25749145b7e4bca4a1e75b2be /ansible_collections/community/docker/plugins
parentInitial commit. (diff)
downloadansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.tar.xz
ansible-975f66f2eebe9dadba04f275774d4ab83f74cf25.zip
Adding upstream version 7.7.0+dfsg.upstream/7.7.0+dfsg
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'ansible_collections/community/docker/plugins')
-rw-r--r--ansible_collections/community/docker/plugins/action/docker_container_copy_into.py40
-rw-r--r--ansible_collections/community/docker/plugins/connection/docker.py452
-rw-r--r--ansible_collections/community/docker/plugins/connection/docker_api.py341
-rw-r--r--ansible_collections/community/docker/plugins/connection/nsenter.py239
-rw-r--r--ansible_collections/community/docker/plugins/doc_fragments/attributes.py96
-rw-r--r--ansible_collections/community/docker/plugins/doc_fragments/docker.py297
-rw-r--r--ansible_collections/community/docker/plugins/inventory/docker_containers.py354
-rw-r--r--ansible_collections/community/docker/plugins/inventory/docker_machine.py275
-rw-r--r--ansible_collections/community/docker/plugins/inventory/docker_swarm.py264
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/_import_helper.py107
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/api/client.py606
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/api/daemon.py196
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/auth.py388
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/constants.py50
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/credentials/constants.py16
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/credentials/errors.py38
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/credentials/store.py119
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/credentials/utils.py62
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/errors.py223
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/tls.py122
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/basehttpadapter.py20
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/npipeconn.py113
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/npipesocket.py259
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/sshconn.py270
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py73
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/transport/unixconn.py114
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/types/daemon.py83
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/build.py305
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/config.py78
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/decorators.py59
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/fnmatch.py127
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/json_stream.py89
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/ports.py95
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/proxy.py85
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py200
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py524
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/_scramble.py56
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/common.py693
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/common_api.py591
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/copy.py442
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/image_archive.py157
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/module_container/base.py1204
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py1353
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/module_container/module.py843
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/socket_handler.py206
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/socket_helper.py66
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/swarm.py281
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/util.py394
-rw-r--r--ansible_collections/community/docker/plugins/module_utils/version.py24
-rw-r--r--ansible_collections/community/docker/plugins/modules/current_container_facts.py145
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_compose.py1236
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_config.py434
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_container.py1288
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py870
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_container_exec.py307
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_container_info.py144
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_host_info.py384
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_image.py1033
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_image_info.py273
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_image_load.py194
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_login.py451
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_network.py679
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_network_info.py140
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_node.py306
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_node_info.py162
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_plugin.py392
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_prune.py275
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_secret.py406
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_stack.py309
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_stack_info.py88
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py98
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_swarm.py728
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_swarm_info.py388
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_swarm_service.py2866
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py121
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_volume.py312
-rw-r--r--ansible_collections/community/docker/plugins/modules/docker_volume_info.py127
-rw-r--r--ansible_collections/community/docker/plugins/plugin_utils/common.py41
-rw-r--r--ansible_collections/community/docker/plugins/plugin_utils/common_api.py40
-rw-r--r--ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py18
80 files changed, 27344 insertions, 0 deletions
diff --git a/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py b/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py
new file mode 100644
index 000000000..372cbd0a3
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import base64
+
+from ansible import constants as C
+from ansible.plugins.action import ActionBase
+from ansible.utils.vars import merge_hash
+
+from ansible_collections.community.docker.plugins.module_utils._scramble import unscramble
+
+
+class ActionModule(ActionBase):
+ # Set to True when transfering files to the remote
+ TRANSFERS_FILES = False
+
+ def run(self, tmp=None, task_vars=None):
+ self._supports_check_mode = True
+ self._supports_async = True
+
+ result = super(ActionModule, self).run(tmp, task_vars)
+ del tmp # tmp no longer has any effect
+
+ self._task.args['_max_file_size_for_diff'] = C.MAX_FILE_SIZE_FOR_DIFF
+
+ result = merge_hash(result, self._execute_module(task_vars=task_vars, wrap_async=self._task.async_val))
+
+ if u'diff' in result and result[u'diff'].get(u'scrambled_diff'):
+ # Scrambling is not done for security, but to avoid no_log screwing up the diff
+ diff = result[u'diff']
+ key = base64.b64decode(diff.pop(u'scrambled_diff'))
+ for k in (u'before', u'after'):
+ if k in diff:
+ diff[k] = unscramble(diff[k], key)
+
+ return result
diff --git a/ansible_collections/community/docker/plugins/connection/docker.py b/ansible_collections/community/docker/plugins/connection/docker.py
new file mode 100644
index 000000000..ba2249299
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/connection/docker.py
@@ -0,0 +1,452 @@
+# Based on the chroot connection plugin by Maykel Moya
+#
+# (c) 2014, Lorin Hochstein
+# (c) 2015, Leendert Brouwer (https://github.com/objectified)
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author:
+ - Lorin Hochestein (!UNKNOWN)
+ - Leendert Brouwer (!UNKNOWN)
+name: docker
+short_description: Run tasks in docker containers
+description:
+ - Run commands or put/fetch files to an existing docker container.
+ - Uses the Docker CLI to execute commands in the container. If you prefer
+ to directly connect to the Docker daemon, use the
+ R(community.docker.docker_api,ansible_collections.community.docker.docker_api_connection)
+ connection plugin.
+options:
+ remote_addr:
+ description:
+ - The name of the container you want to access.
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_docker_host
+ remote_user:
+ description:
+ - The user to execute as inside the container.
+ - If Docker is too old to allow this (< 1.7), the one set by Docker itself will be used.
+ vars:
+ - name: ansible_user
+ - name: ansible_docker_user
+ ini:
+ - section: defaults
+ key: remote_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ cli:
+ - name: user
+ keyword:
+ - name: remote_user
+ docker_extra_args:
+ description:
+ - Extra arguments to pass to the docker command line.
+ default: ''
+ vars:
+ - name: ansible_docker_extra_args
+ ini:
+ - section: docker_connection
+ key: extra_cli_args
+ container_timeout:
+ default: 10
+ description:
+ - Controls how long we can wait to access reading output from the container once execution started.
+ env:
+ - name: ANSIBLE_TIMEOUT
+ - name: ANSIBLE_DOCKER_TIMEOUT
+ version_added: 2.2.0
+ ini:
+ - key: timeout
+ section: defaults
+ - key: timeout
+ section: docker_connection
+ version_added: 2.2.0
+ vars:
+ - name: ansible_docker_timeout
+ version_added: 2.2.0
+ cli:
+ - name: timeout
+ type: integer
+'''
+
+import fcntl
+import os
+import os.path
+import subprocess
+import re
+
+from ansible.compat import selectors
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils.common.process import get_bin_path
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local docker based connections '''
+
+ transport = 'community.docker.docker'
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ # Note: docker supports running as non-root in some configurations.
+ # (For instance, setting the UNIX socket file to be readable and
+ # writable by a specific UNIX group and then putting users into that
+ # group). Therefore we don't check that the user is root when using
+ # this connection. But if the user is getting a permission denied
+ # error it probably means that docker on their system is only
+ # configured to be connected to by root and they are not running as
+ # root.
+
+ self._docker_args = []
+ self._container_user_cache = {}
+ self._version = None
+
+ # Windows uses Powershell modules
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+
+ if 'docker_command' in kwargs:
+ self.docker_cmd = kwargs['docker_command']
+ else:
+ try:
+ self.docker_cmd = get_bin_path('docker')
+ except ValueError:
+ raise AnsibleError("docker command not found in PATH")
+
+ @staticmethod
+ def _sanitize_version(version):
+ version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
+ version = re.sub(u'^v', u'', version)
+ return version
+
+ def _old_docker_version(self):
+ cmd_args = self._docker_args
+
+ old_version_subcommand = ['version']
+
+ old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
+ p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+
+ return old_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _new_docker_version(self):
+ # no result yet, must be newer Docker version
+ cmd_args = self._docker_args
+
+ new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
+
+ new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
+ p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+ return new_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _get_docker_version(self):
+
+ cmd, cmd_output, err, returncode = self._old_docker_version()
+ if returncode == 0:
+ for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
+ if line.startswith(u'Server version:'): # old docker versions
+ return self._sanitize_version(line.split()[2])
+
+ cmd, cmd_output, err, returncode = self._new_docker_version()
+ if returncode:
+ raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
+
+ return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
+
+ def _get_docker_remote_user(self):
+ """ Get the default user configured in the docker container """
+ container = self.get_option('remote_addr')
+ if container in self._container_user_cache:
+ return self._container_user_cache[container]
+ p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', container],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ out, err = p.communicate()
+ out = to_text(out, errors='surrogate_or_strict')
+
+ if p.returncode != 0:
+ display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
+ self._container_user_cache[container] = None
+ return None
+
+ # The default exec user is root, unless it was changed in the Dockerfile with USER
+ user = out.strip() or u'root'
+ self._container_user_cache[container] = user
+ return user
+
+ def _build_exec_cmd(self, cmd):
+ """ Build the local docker exec command to run cmd on remote_host
+
+ If remote_user is available and is supported by the docker
+ version we are using, it will be provided to docker exec.
+ """
+
+ local_cmd = [self.docker_cmd]
+
+ if self._docker_args:
+ local_cmd += self._docker_args
+
+ local_cmd += [b'exec']
+
+ if self.remote_user is not None:
+ local_cmd += [b'-u', self.remote_user]
+
+ # -i is needed to keep stdin open which allows pipelining to work
+ local_cmd += [b'-i', self.get_option('remote_addr')] + cmd
+
+ return local_cmd
+
+ def _set_docker_args(self):
+ # TODO: this is mostly for backwards compatibility, play_context is used as fallback for older versions
+ # docker arguments
+ del self._docker_args[:]
+ extra_args = self.get_option('docker_extra_args') or getattr(self._play_context, 'docker_extra_args', '')
+ if extra_args:
+ self._docker_args += extra_args.split(' ')
+
+ def _set_conn_data(self):
+
+ ''' initialize for the connection, cannot do only in init since all data is not ready at that point '''
+
+ self._set_docker_args()
+
+ self.remote_user = self.get_option('remote_user')
+ if self.remote_user is None and self._play_context.remote_user is not None:
+ self.remote_user = self._play_context.remote_user
+
+ # timeout, use unless default and pc is different, backwards compat
+ self.timeout = self.get_option('container_timeout')
+ if self.timeout == 10 and self.timeout != self._play_context.timeout:
+ self.timeout = self._play_context.timeout
+
+ @property
+ def docker_version(self):
+
+ if not self._version:
+ self._set_docker_args()
+
+ self._version = self._get_docker_version()
+ if self._version == u'dev':
+ display.warning(u'Docker version number is "dev". Will assume latest version.')
+ if self._version != u'dev' and LooseVersion(self._version) < LooseVersion(u'1.3'):
+ raise AnsibleError('docker connection type requires docker 1.3 or higher')
+ return self._version
+
+ def _get_actual_user(self):
+ if self.remote_user is not None:
+ # An explicit user is provided
+ if self.docker_version == u'dev' or LooseVersion(self.docker_version) >= LooseVersion(u'1.7'):
+ # Support for specifying the exec user was added in docker 1.7
+ return self.remote_user
+ else:
+ self.remote_user = None
+ actual_user = self._get_docker_remote_user()
+ if actual_user != self.get_option('remote_user'):
+ display.warning(u'docker {0} does not support remote_user, using container default: {1}'
+ .format(self.docker_version, self.actual_user or u'?'))
+ return actual_user
+ elif self._display.verbosity > 2:
+ # Since we're not setting the actual_user, look it up so we have it for logging later
+ # Only do this if display verbosity is high enough that we'll need the value
+ # This saves overhead from calling into docker when we don't need to.
+ return self._get_docker_remote_user()
+ else:
+ return None
+
+ def _connect(self, port=None):
+ """ Connect to the container. Nothing to do """
+ super(Connection, self)._connect()
+ if not self._connected:
+ self._set_conn_data()
+ actual_user = self._get_actual_user()
+ display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
+ actual_user or u'?'), host=self.get_option('remote_addr')
+ )
+ self._connected = True
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ Run a command on the docker host """
+
+ self._set_conn_data()
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
+
+ display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self.get_option('remote_addr'))
+ display.debug("opening command with Popen()")
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ p = subprocess.Popen(
+ local_cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ display.debug("done running command with Popen()")
+
+ if self.become and self.become.expect_prompt() and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ become_output = b''
+ try:
+ while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
+ events = selector.select(self.timeout)
+ if not events:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ chunk = p.stdout.read()
+ elif key.fileobj == p.stderr:
+ chunk = p.stderr.read()
+
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
+ become_output += chunk
+ finally:
+ selector.close()
+
+ if not self.become.check_success(become_output):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate(in_data)
+ display.debug("done communicating")
+
+ display.debug("done with docker.exec_command()")
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ return ntpath.normpath(remote_path)
+ else:
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ Transfer a file from local to docker container """
+ self._set_conn_data()
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
+
+ out_path = self._prefix_login_path(out_path)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound(
+ "file or module does not exist: %s" % to_native(in_path))
+
+ out_path = shlex_quote(out_path)
+ # Older docker doesn't have native support for copying files into
+ # running containers, so we use docker exec to implement this
+ # Although docker version 1.8 and later provide support, the
+ # owner and group of the files are always set to root
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ try:
+ p = subprocess.Popen(args, stdin=in_file, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
+ (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
+
+ def fetch_file(self, in_path, out_path):
+ """ Fetch a file from container to local. """
+ self._set_conn_data()
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
+
+ in_path = self._prefix_login_path(in_path)
+ # out_path is the final file path, but docker takes a directory, not a
+ # file path
+ out_dir = os.path.dirname(out_path)
+
+ args = [self.docker_cmd, "cp", "%s:%s" % (self.get_option('remote_addr'), in_path), out_dir]
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
+ else:
+ actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
+
+ if p.returncode != 0:
+ # Older docker doesn't have native support for fetching files command `cp`
+ # If `cp` fails, try to use `dd` instead
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
+ try:
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=out_file, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ # Rename if needed
+ if actual_out_path != out_path:
+ os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
+
+ def close(self):
+ """ Terminate the connection. Nothing to do for Docker"""
+ super(Connection, self).close()
+ self._connected = False
+
+ def reset(self):
+ # Clear container user cache
+ self._container_user_cache = {}
diff --git a/ansible_collections/community/docker/plugins/connection/docker_api.py b/ansible_collections/community/docker/plugins/connection/docker_api.py
new file mode 100644
index 000000000..24c95f55a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/connection/docker_api.py
@@ -0,0 +1,341 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author:
+ - Felix Fontein (@felixfontein)
+name: docker_api
+short_description: Run tasks in docker containers
+version_added: 1.1.0
+description:
+ - Run commands or put/fetch files to an existing docker container.
+ - Uses the L(requests library,https://pypi.org/project/requests/) to interact
+ directly with the Docker daemon instead of using the Docker CLI. Use the
+ R(community.docker.docker,ansible_collections.community.docker.docker_connection)
+ connection plugin if you want to use the Docker CLI.
+notes:
+ - Does B(not work with TCP TLS sockets)! This is caused by the inability to send C(close_notify) without closing the connection
+ with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.docker.var_names
+options:
+ remote_user:
+ type: str
+ description:
+ - The user to execute as inside the container.
+ vars:
+ - name: ansible_user
+ - name: ansible_docker_user
+ ini:
+ - section: defaults
+ key: remote_user
+ env:
+ - name: ANSIBLE_REMOTE_USER
+ cli:
+ - name: user
+ keyword:
+ - name: remote_user
+ remote_addr:
+ type: str
+ description:
+ - The name of the container you want to access.
+ default: inventory_hostname
+ vars:
+ - name: inventory_hostname
+ - name: ansible_host
+ - name: ansible_docker_host
+ container_timeout:
+ default: 10
+ description:
+ - Controls how long we can wait to access reading output from the container once execution started.
+ env:
+ - name: ANSIBLE_TIMEOUT
+ - name: ANSIBLE_DOCKER_TIMEOUT
+ version_added: 2.2.0
+ ini:
+ - key: timeout
+ section: defaults
+ - key: timeout
+ section: docker_connection
+ version_added: 2.2.0
+ vars:
+ - name: ansible_docker_timeout
+ version_added: 2.2.0
+ cli:
+ - name: timeout
+ type: integer
+'''
+
+import os
+import os.path
+
+from ansible.errors import AnsibleFileNotFound, AnsibleConnectionFailure
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.copy import (
+ DockerFileCopyError,
+ DockerFileNotFound,
+ fetch_file,
+ put_file,
+)
+
+from ansible_collections.community.docker.plugins.plugin_utils.socket_handler import (
+ DockerSocketHandler,
+)
+from ansible_collections.community.docker.plugins.plugin_utils.common_api import (
+ AnsibleDockerClient,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
+
+MIN_DOCKER_API = None
+
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local docker based connections '''
+
+ transport = 'community.docker.docker_api'
+ has_pipelining = True
+
+ def _call_client(self, callable, not_found_can_be_resource=False):
+ try:
+ return callable()
+ except NotFound as e:
+ if not_found_can_be_resource:
+ raise AnsibleConnectionFailure('Could not find container "{1}" or resource in it ({0})'.format(e, self.get_option('remote_addr')))
+ else:
+ raise AnsibleConnectionFailure('Could not find container "{1}" ({0})'.format(e, self.get_option('remote_addr')))
+ except APIError as e:
+ if e.response is not None and e.response.status_code == 409:
+ raise AnsibleConnectionFailure('The container "{1}" has been paused ({0})'.format(e, self.get_option('remote_addr')))
+ self.client.fail(
+ 'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'An unexpected Docker error occurred for container "{1}": {0}'.format(e, self.get_option('remote_addr'))
+ )
+ except RequestException as e:
+ self.client.fail(
+ 'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}'
+ .format(e, self.get_option('remote_addr'))
+ )
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.client = None
+ self.ids = dict()
+
+ # Windows uses Powershell modules
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+
+ self.actual_user = None
+
+ def _connect(self, port=None):
+ """ Connect to the container. Nothing to do """
+ super(Connection, self)._connect()
+ if not self._connected:
+ self.actual_user = self.get_option('remote_user')
+ display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
+ self.actual_user or u'?'), host=self.get_option('remote_addr')
+ )
+ if self.client is None:
+ self.client = AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
+ self._connected = True
+
+ if self.actual_user is None and display.verbosity > 2:
+ # Since we're not setting the actual_user, look it up so we have it for logging later
+ # Only do this if display verbosity is high enough that we'll need the value
+ # This saves overhead from calling into docker when we don't need to
+ display.vvv(u"Trying to determine actual user")
+ result = self._call_client(lambda: self.client.get_json('/containers/{0}/json', self.get_option('remote_addr')))
+ if result.get('Config'):
+ self.actual_user = result['Config'].get('User')
+ if self.actual_user is not None:
+ display.vvv(u"Actual user is '{0}'".format(self.actual_user))
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ Run a command on the docker host """
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ command = [self._play_context.executable, '-c', to_text(cmd)]
+
+ do_become = self.become and self.become.expect_prompt() and sudoable
+
+ display.vvv(
+ u"EXEC {0}{1}{2}".format(
+ to_text(command),
+ ', with stdin ({0} bytes)'.format(len(in_data)) if in_data is not None else '',
+ ', with become prompt' if do_become else '',
+ ),
+ host=self.get_option('remote_addr')
+ )
+
+ need_stdin = True if (in_data is not None) or do_become else False
+
+ data = {
+ 'Container': self.get_option('remote_addr'),
+ 'User': self.get_option('remote_user') or '',
+ 'Privileged': False,
+ 'Tty': False,
+ 'AttachStdin': need_stdin,
+ 'AttachStdout': True,
+ 'AttachStderr': True,
+ 'Cmd': command,
+ }
+
+ if 'detachKeys' in self.client._general_configs:
+ data['detachKeys'] = self.client._general_configs['detachKeys']
+
+ exec_data = self._call_client(lambda: self.client.post_json_to_json('/containers/{0}/exec', self.get_option('remote_addr'), data=data))
+ exec_id = exec_data['Id']
+
+ data = {
+ 'Tty': False,
+ 'Detach': False
+ }
+ if need_stdin:
+ exec_socket = self._call_client(lambda: self.client.post_json_to_stream_socket('/exec/{0}/start', exec_id, data=data))
+ try:
+ with DockerSocketHandler(display, exec_socket, container=self.get_option('remote_addr')) as exec_socket_handler:
+ if do_become:
+ become_output = [b'']
+
+ def append_become_output(stream_id, data):
+ become_output[0] += data
+
+ exec_socket_handler.set_block_done_callback(append_become_output)
+
+ while not self.become.check_success(become_output[0]) and not self.become.check_password_prompt(become_output[0]):
+ if not exec_socket_handler.select(self.get_option('container_timeout')):
+ stdout, stderr = exec_socket_handler.consume()
+ raise AnsibleConnectionFailure('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output[0]))
+
+ if exec_socket_handler.is_eof():
+ raise AnsibleConnectionFailure('privilege output closed while waiting for password prompt:\n' + to_native(become_output[0]))
+
+ if not self.become.check_success(become_output[0]):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ exec_socket_handler.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+
+ if in_data is not None:
+ exec_socket_handler.write(in_data)
+
+ stdout, stderr = exec_socket_handler.consume()
+ finally:
+ exec_socket.close()
+ else:
+ stdout, stderr = self._call_client(lambda: self.client.post_json_to_stream(
+ '/exec/{0}/start', exec_id, stream=False, demux=True, tty=False, data=data))
+
+ result = self._call_client(lambda: self.client.get_json('/exec/{0}/json', exec_id))
+
+ return result.get('ExitCode') or 0, stdout or b'', stderr or b''
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ return ntpath.normpath(remote_path)
+ else:
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ Transfer a file from local to docker container """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
+
+ out_path = self._prefix_login_path(out_path)
+
+ if self.actual_user not in self.ids:
+ dummy, ids, dummy = self.exec_command(b'id -u && id -g')
+ try:
+ user_id, group_id = ids.splitlines()
+ self.ids[self.actual_user] = int(user_id), int(group_id)
+ display.vvvv(
+ 'PUT: Determined uid={0} and gid={1} for user "{2}"'.format(user_id, group_id, self.actual_user),
+ host=self.get_option('remote_addr')
+ )
+ except Exception as e:
+ raise AnsibleConnectionFailure(
+ 'Error while determining user and group ID of current user in container "{1}": {0}\nGot value: {2!r}'
+ .format(e, self.get_option('remote_addr'), ids)
+ )
+
+ user_id, group_id = self.ids[self.actual_user]
+ try:
+ self._call_client(
+ lambda: put_file(
+ self.client,
+ container=self.get_option('remote_addr'),
+ in_path=in_path,
+ out_path=out_path,
+ user_id=user_id,
+ group_id=group_id,
+ user_name=self.actual_user,
+ follow_links=True,
+ ),
+ not_found_can_be_resource=True,
+ )
+ except DockerFileNotFound as exc:
+ raise AnsibleFileNotFound(to_native(exc))
+ except DockerFileCopyError as exc:
+ raise AnsibleConnectionFailure(to_native(exc))
+
+ def fetch_file(self, in_path, out_path):
+ """ Fetch a file from container to local. """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self.get_option('remote_addr'))
+
+ in_path = self._prefix_login_path(in_path)
+
+ try:
+ self._call_client(
+ lambda: fetch_file(
+ self.client,
+ container=self.get_option('remote_addr'),
+ in_path=in_path,
+ out_path=out_path,
+ follow_links=True,
+ log=lambda msg: display.vvvv(msg, host=self.get_option('remote_addr')),
+ ),
+ not_found_can_be_resource=True,
+ )
+ except DockerFileNotFound as exc:
+ raise AnsibleFileNotFound(to_native(exc))
+ except DockerFileCopyError as exc:
+ raise AnsibleConnectionFailure(to_native(exc))
+
+ def close(self):
+ """ Terminate the connection. Nothing to do for Docker"""
+ super(Connection, self).close()
+ self._connected = False
+
+ def reset(self):
+ self.ids.clear()
diff --git a/ansible_collections/community/docker/plugins/connection/nsenter.py b/ansible_collections/community/docker/plugins/connection/nsenter.py
new file mode 100644
index 000000000..fff36afbb
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/connection/nsenter.py
@@ -0,0 +1,239 @@
+# Copyright (c) 2021 Jeff Goldschrafe <jeff@holyhandgrenade.org>
+# Based on Ansible local connection plugin by:
+# Copyright (c) 2012 Michael DeHaan <michael.dehaan@gmail.com>
+# Copyright (c) 2015, 2017 Toshio Kuratomi <tkuratomi@ansible.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+name: nsenter
+short_description: execute on host running controller container
+version_added: 1.9.0
+description:
+ - This connection plugin allows Ansible, running in a privileged container, to execute tasks on the container
+ host instead of in the container itself.
+ - This is useful for running Ansible in a pull model, while still keeping the Ansible control node
+ containerized.
+ - It relies on having privileged access to run C(nsenter) in the host's PID namespace, allowing it to enter the
+ namespaces of the provided PID (default PID 1, or init/systemd).
+author: Jeff Goldschrafe (@jgoldschrafe)
+options:
+ nsenter_pid:
+ description:
+ - PID to attach with using nsenter.
+ - The default should be fine unless you are attaching as a non-root user.
+ type: int
+ default: 1
+ vars:
+ - name: ansible_nsenter_pid
+ env:
+ - name: ANSIBLE_NSENTER_PID
+ ini:
+ - section: nsenter_connection
+ key: nsenter_pid
+notes:
+ - The remote user is ignored; this plugin always runs as root.
+ - >-
+ This plugin requires the Ansible controller container to be launched in the following way:
+ (1) The container image contains the C(nsenter) program;
+ (2) The container is launched in privileged mode;
+ (3) The container is launched in the host's PID namespace (C(--pid host)).
+'''
+
+import os
+import pty
+import subprocess
+import fcntl
+
+import ansible.constants as C
+from ansible.errors import AnsibleError
+from ansible.module_utils.compat import selectors
+from ansible.module_utils.six import binary_type, text_type
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+from ansible.utils.path import unfrackpath
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ '''Connections to a container host using nsenter
+ '''
+
+ transport = 'community.docker.nsenter'
+ has_pipelining = False
+
+ def __init__(self, *args, **kwargs):
+ super(Connection, self).__init__(*args, **kwargs)
+ self.cwd = None
+
+ def _connect(self):
+ self._nsenter_pid = self.get_option("nsenter_pid")
+
+ # Because nsenter requires very high privileges, our remote user
+ # is always assumed to be root.
+ self._play_context.remote_user = "root"
+
+ if not self._connected:
+ display.vvv(
+ u"ESTABLISH NSENTER CONNECTION FOR USER: {0}".format(
+ self._play_context.remote_user
+ ),
+ host=self._play_context.remote_addr,
+ )
+ self._connected = True
+ return self
+
+ def exec_command(self, cmd, in_data=None, sudoable=True):
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ display.debug("in nsenter.exec_command()")
+
+ executable = C.DEFAULT_EXECUTABLE.split()[0] if C.DEFAULT_EXECUTABLE else None
+
+ if not os.path.exists(to_bytes(executable, errors='surrogate_or_strict')):
+ raise AnsibleError("failed to find the executable specified %s."
+ " Please verify if the executable exists and re-try." % executable)
+
+ # Rewrite the provided command to prefix it with nsenter
+ nsenter_cmd_parts = [
+ "nsenter",
+ "--ipc",
+ "--mount",
+ "--net",
+ "--pid",
+ "--uts",
+ "--preserve-credentials",
+ "--target={0}".format(self._nsenter_pid),
+ "--",
+ ]
+
+ if isinstance(cmd, (text_type, binary_type)):
+ cmd_parts = nsenter_cmd_parts + [cmd]
+ cmd = to_bytes(" ".join(cmd_parts))
+ else:
+ cmd_parts = nsenter_cmd_parts + cmd
+ cmd = [to_bytes(arg) for arg in cmd_parts]
+
+ display.vvv(u"EXEC {0}".format(to_text(cmd)), host=self._play_context.remote_addr)
+ display.debug("opening command with Popen()")
+
+ master = None
+ stdin = subprocess.PIPE
+
+ # This plugin does not support pipelining. This diverges from the behavior of
+ # the core "local" connection plugin that this one derives from.
+ if sudoable and self.become and self.become.expect_prompt():
+ # Create a pty if sudoable for privlege escalation that needs it.
+ # Falls back to using a standard pipe if this fails, which may
+ # cause the command to fail in certain situations where we are escalating
+ # privileges or the command otherwise needs a pty.
+ try:
+ master, stdin = pty.openpty()
+ except (IOError, OSError) as e:
+ display.debug("Unable to open pty: %s" % to_native(e))
+
+ p = subprocess.Popen(
+ cmd,
+ shell=isinstance(cmd, (text_type, binary_type)),
+ executable=executable if isinstance(cmd, (text_type, binary_type)) else None,
+ cwd=self.cwd,
+ stdin=stdin,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+
+ # if we created a master, we can close the other half of the pty now, otherwise master is stdin
+ if master is not None:
+ os.close(stdin)
+
+ display.debug("done running command with Popen()")
+
+ if self.become and self.become.expect_prompt() and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ become_output = b''
+ try:
+ while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
+ events = selector.select(self._play_context.timeout)
+ if not events:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ chunk = p.stdout.read()
+ elif key.fileobj == p.stderr:
+ chunk = p.stderr.read()
+
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
+ become_output += chunk
+ finally:
+ selector.close()
+
+ if not self.become.check_success(become_output):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ if master is None:
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ else:
+ os.write(master, to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate(in_data)
+ display.debug("done communicating")
+
+ # finally, close the other half of the pty, if it was created
+ if master:
+ os.close(master)
+
+ display.debug("done with nsenter.exec_command()")
+ return (p.returncode, stdout, stderr)
+
+ def put_file(self, in_path, out_path):
+ super(Connection, self).put_file(in_path, out_path)
+
+ in_path = unfrackpath(in_path, basedir=self.cwd)
+ out_path = unfrackpath(out_path, basedir=self.cwd)
+
+ display.vvv(u"PUT {0} to {1}".format(in_path, out_path), host=self._play_context.remote_addr)
+ try:
+ with open(to_bytes(in_path, errors="surrogate_or_strict"), "rb") as in_file:
+ in_data = in_file.read()
+ rc, out, err = self.exec_command(cmd=["tee", out_path], in_data=in_data)
+ if rc != 0:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, err))
+ except IOError as e:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(out_path, to_native(e)))
+
+ def fetch_file(self, in_path, out_path):
+ super(Connection, self).fetch_file(in_path, out_path)
+
+ in_path = unfrackpath(in_path, basedir=self.cwd)
+ out_path = unfrackpath(out_path, basedir=self.cwd)
+
+ try:
+ rc, out, err = self.exec_command(cmd=["cat", in_path])
+ display.vvv(u"FETCH {0} TO {1}".format(in_path, out_path), host=self._play_context.remote_addr)
+ if rc != 0:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(in_path, err))
+ with open(to_bytes(out_path, errors='surrogate_or_strict'), 'wb') as out_file:
+ out_file.write(out)
+ except IOError as e:
+ raise AnsibleError("failed to transfer file to {0}: {1}".format(to_native(out_path), to_native(e)))
+
+ def close(self):
+ ''' terminate the connection; nothing to do here '''
+ self._connected = False
diff --git a/ansible_collections/community/docker/plugins/doc_fragments/attributes.py b/ansible_collections/community/docker/plugins/doc_fragments/attributes.py
new file mode 100644
index 000000000..00b3319fc
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/doc_fragments/attributes.py
@@ -0,0 +1,96 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Standard documentation fragment
+ DOCUMENTATION = r'''
+options: {}
+attributes:
+ check_mode:
+ description: Can run in C(check_mode) and return changed status prediction without modifying target.
+ diff_mode:
+ description: Will return details on what has changed (or possibly needs changing in C(check_mode)), when in diff mode.
+'''
+
+ # Should be used together with the standard fragment
+ INFO_MODULE = r'''
+options: {}
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+'''
+
+ ACTIONGROUP_DOCKER = r'''
+options: {}
+attributes:
+ action_group:
+ description: Use C(group/docker) or C(group/community.docker.docker) in C(module_defaults) to set defaults for this module.
+ support: full
+ membership:
+ - community.docker.docker
+ - docker
+'''
+
+ CONN = r'''
+options: {}
+attributes:
+ become:
+ description: Is usable alongside C(become) keywords.
+ connection:
+ description: Uses the target's configured connection information to execute code on it.
+ delegation:
+ description: Can be used in conjunction with C(delegate_to) and related keywords.
+'''
+
+ FACTS = r'''
+options: {}
+attributes:
+ facts:
+ description: Action returns an C(ansible_facts) dictionary that will update existing host facts.
+'''
+
+ # Should be used together with the standard fragment and the FACTS fragment
+ FACTS_MODULE = r'''
+options: {}
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+ facts:
+ support: full
+'''
+
+ FILES = r'''
+options: {}
+attributes:
+ safe_file_operations:
+ description: Uses Ansible's strict file operation functions to ensure proper permissions and avoid data corruption.
+'''
+
+ FLOW = r'''
+options: {}
+attributes:
+ action:
+ description: Indicates this has a corresponding action plugin so some parts of the options can be executed on the controller.
+ async:
+ description: Supports being used with the C(async) keyword.
+'''
diff --git a/ansible_collections/community/docker/plugins/doc_fragments/docker.py b/ansible_collections/community/docker/plugins/doc_fragments/docker.py
new file mode 100644
index 000000000..4c537850e
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/doc_fragments/docker.py
@@ -0,0 +1,297 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Docker doc fragment
+ DOCUMENTATION = r'''
+options:
+ docker_host:
+ description:
+ - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
+ TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
+ the module will automatically replace C(tcp) in the connection URL with C(https).
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: unix://var/run/docker.sock
+ aliases: [ docker_url ]
+ tls_hostname:
+ description:
+ - When verifying the authenticity of the Docker Host server, provide the expected name of the server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
+ be used instead. If the environment variable is not set, the default value will be used.
+ - Note that this option had a default value C(localhost) in older versions. It was removed in community.docker 3.0.0.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: auto
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ ca_cert:
+ description:
+ - Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description:
+ - Path to the client's TLS certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ client_key:
+ description:
+ - Path to the client's TLS key file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ssl_version:
+ description:
+ - Provide a valid SSL version number. Default value determined by ssl.py module.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
+ used instead.
+ type: str
+ tls:
+ description:
+ - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
+ server. Note that if I(validate_certs) is set to C(true) as well, it will take precedence.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: false
+ use_ssh_client:
+ description:
+ - For SSH transports, use the C(ssh) CLI tool instead of paramiko.
+ - Requires Docker SDK for Python 4.4.0 or newer.
+ type: bool
+ default: false
+ version_added: 1.5.0
+ validate_certs:
+ description:
+ - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: false
+ aliases: [ tls_verify ]
+ debug:
+ description:
+ - Debug mode
+ type: bool
+ default: false
+
+notes:
+ - Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
+ You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
+ C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
+ with the product that sets up the environment. It will set these variables for you. See
+ U(https://docs.docker.com/machine/reference/env/) for more details.
+ - When connecting to Docker daemon with TLS, you might need to install additional Python packages.
+ For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
+ - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
+ In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
+ and use C($DOCKER_CONFIG/config.json) otherwise.
+'''
+
+ # For plugins: allow to define common options with Ansible variables
+
+ VAR_NAMES = r'''
+options:
+ docker_host:
+ vars:
+ - name: ansible_docker_docker_host
+ tls_hostname:
+ vars:
+ - name: ansible_docker_tls_hostname
+ api_version:
+ vars:
+ - name: ansible_docker_api_version
+ timeout:
+ vars:
+ - name: ansible_docker_timeout
+ ca_cert:
+ vars:
+ - name: ansible_docker_ca_cert
+ client_cert:
+ vars:
+ - name: ansible_docker_client_cert
+ client_key:
+ vars:
+ - name: ansible_docker_client_key
+ ssl_version:
+ vars:
+ - name: ansible_docker_ssl_version
+ tls:
+ vars:
+ - name: ansible_docker_tls
+ validate_certs:
+ vars:
+ - name: ansible_docker_validate_certs
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
+
+ DOCKER_PY_1_DOCUMENTATION = r'''
+options: {}
+notes:
+ - This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
+ communicate with the Docker daemon.
+requirements:
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details). Note that both
+ modules should *not* be installed at the same time. Also note that when both modules are
+ installed and one of them is uninstalled, the other might no longer function and a reinstall
+ of it is required."
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
+ # Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
+
+ DOCKER_PY_2_DOCUMENTATION = r'''
+options: {}
+notes:
+ - This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
+ communicate with the Docker daemon.
+requirements:
+ - "Python >= 2.7"
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details).
+ This module does *not* work with docker-py."
+'''
+
+ # Docker doc fragment when using the vendored API access code
+ API_DOCUMENTATION = r'''
+options:
+ docker_host:
+ description:
+ - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
+ TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
+ the module will automatically replace C(tcp) in the connection URL with C(https).
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: unix://var/run/docker.sock
+ aliases: [ docker_url ]
+ tls_hostname:
+ description:
+ - When verifying the authenticity of the Docker Host server, provide the expected name of the server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
+ be used instead. If the environment variable is not set, the default value will be used.
+ - Note that this option had a default value C(localhost) in older versions. It was removed in community.docker 3.0.0.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by this collection and the docker daemon.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: auto
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ ca_cert:
+ description:
+ - Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description:
+ - Path to the client's TLS certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ client_key:
+ description:
+ - Path to the client's TLS key file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ssl_version:
+ description:
+ - Provide a valid SSL version number. Default value determined by ssl.py module.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
+ used instead.
+ type: str
+ tls:
+ description:
+ - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
+ server. Note that if I(validate_certs) is set to C(true) as well, it will take precedence.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: false
+ use_ssh_client:
+ description:
+ - For SSH transports, use the C(ssh) CLI tool instead of paramiko.
+ type: bool
+ default: false
+ version_added: 1.5.0
+ validate_certs:
+ description:
+ - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: false
+ aliases: [ tls_verify ]
+ debug:
+ description:
+ - Debug mode
+ type: bool
+ default: false
+
+notes:
+ - Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
+ You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
+ C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
+ with the product that sets up the environment. It will set these variables for you. See
+ U(https://docs.docker.com/machine/reference/env/) for more details.
+# - When connecting to Docker daemon with TLS, you might need to install additional Python packages.
+# For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
+# - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
+# In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
+# and use C($DOCKER_CONFIG/config.json) otherwise.
+ - This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
+ communicate with the Docker daemon. It uses code derived from the Docker SDK or Python that is included in this
+ collection.
+requirements:
+ - requests
+ - pywin32 (when using named pipes on Windows 32)
+ - paramiko (when using SSH with I(use_ssh_client=false))
+ - pyOpenSSL (when using TLS)
+ - backports.ssl_match_hostname (when using TLS on Python 2)
+'''
diff --git a/ansible_collections/community/docker/plugins/inventory/docker_containers.py b/ansible_collections/community/docker/plugins/inventory/docker_containers.py
new file mode 100644
index 000000000..a82cda955
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/inventory/docker_containers.py
@@ -0,0 +1,354 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
+# For the parts taken from the docker inventory script:
+# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
+# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
+# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+name: docker_containers
+short_description: Ansible dynamic inventory plugin for Docker containers
+version_added: 1.1.0
+author:
+ - Felix Fontein (@felixfontein)
+extends_documentation_fragment:
+ - ansible.builtin.constructed
+ - community.docker.docker.api_documentation
+description:
+ - Reads inventories from the Docker API.
+ - Uses a YAML configuration file that ends with C(docker.[yml|yaml]).
+options:
+ plugin:
+ description:
+ - The name of this plugin, it should always be set to C(community.docker.docker_containers)
+ for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices: [ community.docker.docker_containers ]
+
+ connection_type:
+ description:
+ - Which connection type to use the containers.
+ - One way to connect to containers is to use SSH (C(ssh)). For this, the options I(default_ip) and
+ I(private_ssh_port) are used. This requires that a SSH daemon is running inside the containers.
+ - Alternatively, C(docker-cli) selects the
+ R(docker connection plugin,ansible_collections.community.docker.docker_connection),
+ and C(docker-api) (default) selects the
+ R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection).
+ - When C(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin
+ to the connection plugin. This can be controlled with I(configure_docker_daemon).
+ - Note that the R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection)
+ does B(not work with TCP TLS sockets)! See U(https://github.com/ansible-collections/community.docker/issues/605)
+ for more information.
+ type: str
+ default: docker-api
+ choices:
+ - ssh
+ - docker-cli
+ - docker-api
+
+ configure_docker_daemon:
+ description:
+ - Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin.
+ - Only used when I(connection_type=docker-api).
+ type: bool
+ default: true
+ version_added: 1.8.0
+
+ verbose_output:
+ description:
+ - Toggle to (not) include all available inspection metadata.
+ - Note that all top-level keys will be transformed to the format C(docker_xxx).
+ For example, C(HostConfig) is converted to C(docker_hostconfig).
+ - If this is C(false), these values can only be used during I(constructed), I(groups), and I(keyed_groups).
+ - The C(docker) inventory script always added these variables, so for compatibility set this to C(true).
+ type: bool
+ default: false
+
+ default_ip:
+ description:
+ - The IP address to assign to ansible_host when the container's SSH port is mapped to interface
+ '0.0.0.0'.
+ - Only used if I(connection_type) is C(ssh).
+ type: str
+ default: 127.0.0.1
+
+ private_ssh_port:
+ description:
+ - The port containers use for SSH.
+ - Only used if I(connection_type) is C(ssh).
+ type: int
+ default: 22
+
+ add_legacy_groups:
+ description:
+ - "Add the same groups as the C(docker) inventory script does. These are the following:"
+ - "C(<container id>): contains the container of this ID."
+ - "C(<container name>): contains the container that has this name."
+ - "C(<container short id>): contains the containers that have this short ID (first 13 letters of ID)."
+ - "C(image_<image name>): contains the containers that have the image C(<image name>)."
+ - "C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>)."
+ - "C(service_<service name>): contains the containers that belong to the service C(<service name>)"
+ - "C(<docker_host>): contains the containers which belong to the Docker daemon I(docker_host).
+ Useful if you run this plugin against multiple Docker daemons."
+ - "C(running): contains all containers that are running."
+ - "C(stopped): contains all containers that are not running."
+ - If this is not set to C(true), you should use keyed groups to add the containers to groups.
+ See the examples for how to do that.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+# Minimal example using local Docker daemon
+plugin: community.docker.docker_containers
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote Docker daemon
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote Docker daemon with unverified TLS
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2376
+tls: true
+
+# Example using remote Docker daemon with verified TLS and client certificate verification
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2376
+validate_certs: true
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2375
+strict: false
+keyed_groups:
+ # Add containers with primary network foo to a network_foo group
+ - prefix: network
+ key: 'docker_hostconfig.NetworkMode'
+ # Add Linux hosts to an os_linux group
+ - prefix: os
+ key: docker_platform
+
+# Example using SSH connection with an explicit fallback for when port 22 has not been
+# exported: use container name as ansible_ssh_host and 22 as ansible_ssh_port
+plugin: community.docker.docker_containers
+connection_type: ssh
+compose:
+ ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true)
+ ansible_ssh_port: ansible_ssh_port | default(22, true)
+'''
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_native
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DOCKER_COMMON_ARGS_VARS,
+)
+from ansible_collections.community.docker.plugins.plugin_utils.common_api import (
+ AnsibleDockerClient,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException
+
+MIN_DOCKER_API = None
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker daemon as source. '''
+
+ NAME = 'community.docker.docker_containers'
+
+ def _slugify(self, value):
+ return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
+
+ def _populate(self, client):
+ strict = self.get_option('strict')
+
+ ssh_port = self.get_option('private_ssh_port')
+ default_ip = self.get_option('default_ip')
+ hostname = self.get_option('docker_host')
+ verbose_output = self.get_option('verbose_output')
+ connection_type = self.get_option('connection_type')
+ add_legacy_groups = self.get_option('add_legacy_groups')
+
+ try:
+ params = {
+ 'limit': -1,
+ 'all': 1,
+ 'size': 0,
+ 'trunc_cmd': 0,
+ 'since': None,
+ 'before': None,
+ }
+ containers = client.get_json('/containers/json', params=params)
+ except APIError as exc:
+ raise AnsibleError("Error listing containers: %s" % to_native(exc))
+
+ if add_legacy_groups:
+ self.inventory.add_group('running')
+ self.inventory.add_group('stopped')
+
+ extra_facts = {}
+ if self.get_option('configure_docker_daemon'):
+ for option_name, var_name in DOCKER_COMMON_ARGS_VARS.items():
+ value = self.get_option(option_name)
+ if value is not None:
+ extra_facts[var_name] = value
+
+ for container in containers:
+ id = container.get('Id')
+ short_id = id[:13]
+
+ try:
+ name = container.get('Names', list())[0].lstrip('/')
+ full_name = name
+ except IndexError:
+ name = short_id
+ full_name = id
+
+ self.inventory.add_host(name)
+ facts = dict(
+ docker_name=name,
+ docker_short_id=short_id
+ )
+ full_facts = dict()
+
+ try:
+ inspect = client.get_json('/containers/{0}/json', id)
+ except APIError as exc:
+ raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc)))
+
+ state = inspect.get('State') or dict()
+ config = inspect.get('Config') or dict()
+ labels = config.get('Labels') or dict()
+
+ running = state.get('Running')
+
+ # Add container to groups
+ image_name = config.get('Image')
+ if image_name and add_legacy_groups:
+ self.inventory.add_group('image_{0}'.format(image_name))
+ self.inventory.add_host(name, group='image_{0}'.format(image_name))
+
+ stack_name = labels.get('com.docker.stack.namespace')
+ if stack_name:
+ full_facts['docker_stack'] = stack_name
+ if add_legacy_groups:
+ self.inventory.add_group('stack_{0}'.format(stack_name))
+ self.inventory.add_host(name, group='stack_{0}'.format(stack_name))
+
+ service_name = labels.get('com.docker.swarm.service.name')
+ if service_name:
+ full_facts['docker_service'] = service_name
+ if add_legacy_groups:
+ self.inventory.add_group('service_{0}'.format(service_name))
+ self.inventory.add_host(name, group='service_{0}'.format(service_name))
+
+ if connection_type == 'ssh':
+ # Figure out ssh IP and Port
+ try:
+ # Lookup the public facing port Nat'ed to ssh port.
+ network_settings = inspect.get('NetworkSettings') or {}
+ port_settings = network_settings.get('Ports') or {}
+ port = port_settings.get('%d/tcp' % (ssh_port, ))[0]
+ except (IndexError, AttributeError, TypeError):
+ port = dict()
+
+ try:
+ ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
+ except KeyError:
+ ip = ''
+
+ facts.update(dict(
+ ansible_ssh_host=ip,
+ ansible_ssh_port=port.get('HostPort', 0),
+ ))
+ elif connection_type == 'docker-cli':
+ facts.update(dict(
+ ansible_host=full_name,
+ ansible_connection='community.docker.docker',
+ ))
+ elif connection_type == 'docker-api':
+ facts.update(dict(
+ ansible_host=full_name,
+ ansible_connection='community.docker.docker_api',
+ ))
+ facts.update(extra_facts)
+
+ full_facts.update(facts)
+ for key, value in inspect.items():
+ fact_key = self._slugify(key)
+ full_facts[fact_key] = value
+
+ if verbose_output:
+ facts.update(full_facts)
+
+ for key, value in facts.items():
+ self.inventory.set_variable(name, key, value)
+
+ # Use constructed if applicable
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), full_facts, name, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), full_facts, name, strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), full_facts, name, strict=strict)
+
+ # We need to do this last since we also add a group called `name`.
+ # When we do this before a set_variable() call, the variables are assigned
+ # to the group, and not to the host.
+ if add_legacy_groups:
+ self.inventory.add_group(id)
+ self.inventory.add_host(name, group=id)
+ self.inventory.add_group(name)
+ self.inventory.add_host(name, group=name)
+ self.inventory.add_group(short_id)
+ self.inventory.add_host(name, group=short_id)
+ self.inventory.add_group(hostname)
+ self.inventory.add_host(name, group=hostname)
+
+ if running is True:
+ self.inventory.add_host(name, group='running')
+ else:
+ self.inventory.add_host(name, group='stopped')
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker.yaml', 'docker.yml')))
+
+ def _create_client(self):
+ return AnsibleDockerClient(self, min_docker_api_version=MIN_DOCKER_API)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ client = self._create_client()
+ try:
+ self._populate(client)
+ except DockerException as e:
+ raise AnsibleError(
+ 'An unexpected Docker error occurred: {0}'.format(e)
+ )
+ except RequestException as e:
+ raise AnsibleError(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(e)
+ )
diff --git a/ansible_collections/community/docker/plugins/inventory/docker_machine.py b/ansible_collections/community/docker/plugins/inventory/docker_machine.py
new file mode 100644
index 000000000..69d946100
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/inventory/docker_machine.py
@@ -0,0 +1,275 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_machine
+ author: Ximon Eighteen (@ximon18)
+ short_description: Docker Machine inventory source
+ requirements:
+ - L(Docker Machine,https://docs.docker.com/machine/)
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Get inventory hosts from Docker Machine.
+ - Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
+ - The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
+ - The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the C(docker_machine) plugin.
+ required: true
+ choices: ['docker_machine', 'community.docker.docker_machine']
+ daemon_env:
+ description:
+ - Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
+ - With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
+ A warning will be issued for any skipped host if the choice is C(require).
+ - With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
+ A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
+ - With C(skip), do not attempt to fetch the docker daemon connection environment variables.
+ - If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
+ type: str
+ choices:
+ - require
+ - require-silently
+ - optional
+ - optional-silently
+ - skip
+ default: require
+ running_required:
+ description:
+ - When C(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
+ type: bool
+ default: true
+ verbose_output:
+ description:
+ - When C(true), include all available nodes metadata (for exmaple C(Image), C(Region), C(Size)) as a JSON object
+ named C(docker_machine_node_attributes).
+ type: bool
+ default: true
+'''
+
+EXAMPLES = '''
+# Minimal example
+plugin: community.docker.docker_machine
+
+# Example using constructed features to create a group per Docker Machine driver
+# (https://docs.docker.com/machine/drivers/), for example:
+# $ docker-machine create --driver digitalocean ... mymachine
+# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
+# {
+# ...
+# "digitalocean": {
+# "hosts": [
+# "mymachine"
+# ]
+# ...
+# }
+strict: false
+keyed_groups:
+ - separator: ''
+ key: docker_machine_node_attributes.DriverName
+
+# Example grouping hosts by Digital Machine tag
+strict: false
+keyed_groups:
+ - prefix: tag
+ key: 'dm_tags'
+
+# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
+compose:
+ ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.common.text.converters import to_text
+from ansible.module_utils.common.process import get_bin_path
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+
+import json
+import re
+import subprocess
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using Docker machine as source. '''
+
+ NAME = 'community.docker.docker_machine'
+
+ DOCKER_MACHINE_PATH = None
+
+ def _run_command(self, args):
+ if not self.DOCKER_MACHINE_PATH:
+ try:
+ self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
+ except ValueError as e:
+ raise AnsibleError(to_native(e))
+
+ command = [self.DOCKER_MACHINE_PATH]
+ command.extend(args)
+ display.debug('Executing command {0}'.format(command))
+ try:
+ result = subprocess.check_output(command)
+ except subprocess.CalledProcessError as e:
+ display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
+ raise e
+
+ return to_text(result).strip()
+
+ def _get_docker_daemon_variables(self, machine_name):
+ '''
+ Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
+ the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
+ '''
+ try:
+ env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
+ except subprocess.CalledProcessError:
+ # This can happen when the machine is created but provisioning is incomplete
+ return []
+
+ # example output of docker-machine env --shell=sh:
+ # export DOCKER_TLS_VERIFY="1"
+ # export DOCKER_HOST="tcp://134.209.204.160:2376"
+ # export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
+ # export DOCKER_MACHINE_NAME="routinator"
+ # # Run this command to configure your shell:
+ # # eval $(docker-machine env --shell=bash routinator)
+
+ # capture any of the DOCKER_xxx variables that were output and create Ansible host vars
+ # with the same name and value but with a dm_ name prefix.
+ vars = []
+ for line in env_lines:
+ match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
+ if match:
+ env_var_name = match.group(1)
+ env_var_value = match.group(2)
+ vars.append((env_var_name, env_var_value))
+
+ return vars
+
+ def _get_machine_names(self):
+ # Filter out machines that are not in the Running state as we probably can't do anything useful actions
+ # with them.
+ ls_command = ['ls', '-q']
+ if self.get_option('running_required'):
+ ls_command.extend(['--filter', 'state=Running'])
+
+ try:
+ ls_lines = self._run_command(ls_command)
+ except subprocess.CalledProcessError:
+ return []
+
+ return ls_lines.splitlines()
+
+ def _inspect_docker_machine_host(self, node):
+ try:
+ inspect_lines = self._run_command(['inspect', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return json.loads(inspect_lines)
+
+ def _ip_addr_docker_machine_host(self, node):
+ try:
+ ip_addr = self._run_command(['ip', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return ip_addr
+
+ def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
+ if not env_var_tuples:
+ warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
+ if daemon_env in ('require', 'require-silently'):
+ if daemon_env == 'require':
+ display.warning('{0}: host will be skipped'.format(warning_prefix))
+ return True
+ else: # 'optional', 'optional-silently'
+ if daemon_env == 'optional':
+ display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
+ return False
+
+ def _populate(self):
+ daemon_env = self.get_option('daemon_env')
+ try:
+ for self.node in self._get_machine_names():
+ self.node_attrs = self._inspect_docker_machine_host(self.node)
+ if not self.node_attrs:
+ continue
+
+ machine_name = self.node_attrs['Driver']['MachineName']
+
+ # query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
+ # that could be used to set environment variables to influence a local Docker client:
+ if daemon_env == 'skip':
+ env_var_tuples = []
+ else:
+ env_var_tuples = self._get_docker_daemon_variables(machine_name)
+ if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
+ continue
+
+ # add an entry in the inventory for this host
+ self.inventory.add_host(machine_name)
+
+ # check for valid ip address from inspect output, else explicitly use ip command to find host ip address
+ # this works around an issue seen with Google Compute Platform where the IP address was not available
+ # via the 'inspect' subcommand but was via the 'ip' subcomannd.
+ if self.node_attrs['Driver']['IPAddress']:
+ ip_addr = self.node_attrs['Driver']['IPAddress']
+ else:
+ ip_addr = self._ip_addr_docker_machine_host(self.node)
+
+ # set standard Ansible remote host connection settings to details captured from `docker-machine`
+ # see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
+ self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
+ self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
+ self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
+ self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
+
+ # set variables based on Docker Machine tags
+ tags = self.node_attrs['Driver'].get('Tags') or ''
+ self.inventory.set_variable(machine_name, 'dm_tags', tags)
+
+ # set variables based on Docker Machine env variables
+ for kv in env_var_tuples:
+ self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
+
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
+
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
+ to_native(e), orig_exc=e)
+
+ def verify_file(self, path):
+ """Return the possibility of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/ansible_collections/community/docker/plugins/inventory/docker_swarm.py b/ansible_collections/community/docker/plugins/inventory/docker_swarm.py
new file mode 100644
index 000000000..ebb1da15c
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/inventory/docker_swarm.py
@@ -0,0 +1,264 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_swarm
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
+ requirements:
+ - python >= 2.7
+ - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the Docker swarm API.
+ - Uses a YAML configuration file docker_swarm.[yml|yaml].
+ - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
+ I(managers) - all manager nodes; I(leader) - the swarm leader node;
+ I(nonleaders) - all nodes except the swarm leader."
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.docker.docker_swarm)
+ for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices: [ docker_swarm, community.docker.docker_swarm ]
+ docker_host:
+ description:
+ - Socket of a Docker swarm manager node (C(tcp), C(unix)).
+ - "Use C(unix://var/run/docker.sock) to connect via local socket."
+ type: str
+ required: true
+ aliases: [ docker_url ]
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS),
+ C(EngineVersion))
+ type: bool
+ default: true
+ tls:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ type: bool
+ default: false
+ validate_certs:
+ description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
+ host server.
+ type: bool
+ default: false
+ aliases: [ tls_verify ]
+ client_key:
+ description: Path to the client's TLS key file.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ca_cert:
+ description: Use a CA certificate when performing server verification by providing the path to a CA
+ certificate file.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description: Path to the client's TLS certificate file.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ tls_hostname:
+ description: When verifying the authenticity of the Docker host server, provide the expected name of
+ the server.
+ type: str
+ ssl_version:
+ description: Provide a valid SSL version number. Default value determined by ssl.py module.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by Docker SDK for Python.
+ type: str
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
+ will be used instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ aliases: [ time_out ]
+ use_ssh_client:
+ description:
+ - For SSH transports, use the C(ssh) CLI tool instead of paramiko.
+ - Requires Docker SDK for Python 4.4.0 or newer.
+ type: bool
+ default: false
+ version_added: 1.5.0
+ include_host_uri:
+ description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
+ swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
+ modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
+ The port always defaults to C(2376).
+ type: bool
+ default: false
+ include_host_uri_port:
+ description: Override the detected port number included in I(ansible_host_uri)
+ type: int
+'''
+
+EXAMPLES = '''
+# Minimal example using local docker
+plugin: community.docker.docker_swarm
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote docker
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote docker with unverified TLS
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2376
+tls: true
+
+# Example using remote docker with verified TLS and client certificate verification
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2376
+validate_certs: true
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups and set ansible_host
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2375
+strict: false
+keyed_groups:
+ # add for example x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'Description.Platform.Architecture'
+ # add for example linux hosts to an os_linux group
+ - prefix: os
+ key: 'Description.Platform.OS'
+ # create a group per node label
+ # for exomple a node labeled w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'Spec.Labels'
+ prefix: label
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils.common.text.converters import to_native
+from ansible_collections.community.docker.plugins.module_utils.common import get_connect_params
+from ansible_collections.community.docker.plugins.module_utils.util import update_tls_hostname
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.parsing.utils.addresses import parse_address
+
+try:
+ import docker
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker swarm as source. '''
+
+ NAME = 'community.docker.docker_swarm'
+
+ def _fail(self, msg):
+ raise AnsibleError(msg)
+
+ def _populate(self):
+ raw_params = dict(
+ docker_host=self.get_option('docker_host'),
+ tls=self.get_option('tls'),
+ tls_verify=self.get_option('validate_certs'),
+ key_path=self.get_option('client_key'),
+ cacert_path=self.get_option('ca_cert'),
+ cert_path=self.get_option('client_cert'),
+ tls_hostname=self.get_option('tls_hostname'),
+ api_version=self.get_option('api_version'),
+ timeout=self.get_option('timeout'),
+ ssl_version=self.get_option('ssl_version'),
+ use_ssh_client=self.get_option('use_ssh_client'),
+ debug=None,
+ )
+ update_tls_hostname(raw_params)
+ connect_params = get_connect_params(raw_params, fail_function=self._fail)
+ self.client = docker.DockerClient(**connect_params)
+ self.inventory.add_group('all')
+ self.inventory.add_group('manager')
+ self.inventory.add_group('worker')
+ self.inventory.add_group('leader')
+ self.inventory.add_group('nonleaders')
+
+ if self.get_option('include_host_uri'):
+ if self.get_option('include_host_uri_port'):
+ host_uri_port = str(self.get_option('include_host_uri_port'))
+ elif self.get_option('tls') or self.get_option('validate_certs'):
+ host_uri_port = '2376'
+ else:
+ host_uri_port = '2375'
+
+ try:
+ self.nodes = self.client.nodes.list()
+ for self.node in self.nodes:
+ self.node_attrs = self.client.nodes.get(self.node.id).attrs
+ self.inventory.add_host(self.node_attrs['ID'])
+ self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
+ self.node_attrs['Status']['Addr'])
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
+ if 'ManagerStatus' in self.node_attrs:
+ if self.node_attrs['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
+ self.node_attrs['Status']['Addr']
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + swarm_leader_ip + ':' + host_uri_port)
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
+ self.inventory.add_host(self.node_attrs['ID'], group='leader')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
+ to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_DOCKER:
+ raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
+ 'https://github.com/docker/docker-py.')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/_import_helper.py b/ansible_collections/community/docker/plugins/module_utils/_api/_import_helper.py
new file mode 100644
index 000000000..0afe0b47c
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/_import_helper.py
@@ -0,0 +1,107 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import traceback
+
+from ansible.module_utils.six import PY2
+
+
+REQUESTS_IMPORT_ERROR = None
+URLLIB3_IMPORT_ERROR = None
+BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = None
+
+
+try:
+ from requests import Session # noqa: F401, pylint: disable=unused-import
+ from requests.adapters import HTTPAdapter # noqa: F401, pylint: disable=unused-import
+ from requests.exceptions import HTTPError, InvalidSchema # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ REQUESTS_IMPORT_ERROR = traceback.format_exc()
+
+ class Session(object):
+ __attrs__ = []
+
+ class HTTPAdapter(object):
+ __attrs__ = []
+
+ class HTTPError(Exception):
+ pass
+
+ class InvalidSchema(Exception):
+ pass
+
+
+try:
+ from requests.packages import urllib3
+ from requests.packages.urllib3 import connection as urllib3_connection # pylint: disable=unused-import
+except ImportError:
+ try:
+ import urllib3
+ from urllib3 import connection as urllib3_connection # pylint: disable=unused-import
+ except ImportError:
+ URLLIB3_IMPORT_ERROR = traceback.format_exc()
+
+ class _HTTPConnectionPool(object):
+ pass
+
+ class _HTTPConnection(object):
+ pass
+
+ class FakeURLLIB3(object):
+ def __init__(self):
+ self._collections = self
+ self.poolmanager = self
+ self.connection = self
+ self.connectionpool = self
+
+ self.RecentlyUsedContainer = object()
+ self.PoolManager = object()
+ self.match_hostname = object()
+ self.HTTPConnectionPool = _HTTPConnectionPool
+
+ class FakeURLLIB3Connection(object):
+ def __init__(self):
+ self.HTTPConnection = _HTTPConnection
+
+ urllib3 = FakeURLLIB3()
+ urllib3_connection = FakeURLLIB3Connection()
+
+
+# Monkey-patching match_hostname with a version that supports
+# IP-address checking. Not necessary for Python 3.5 and above
+if PY2:
+ try:
+ from backports.ssl_match_hostname import match_hostname
+ urllib3.connection.match_hostname = match_hostname
+ except ImportError:
+ BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR = traceback.format_exc()
+
+
+def fail_on_missing_imports():
+ if REQUESTS_IMPORT_ERROR is not None:
+ from .errors import MissingRequirementException
+
+ raise MissingRequirementException(
+ 'You have to install requests',
+ 'requests', REQUESTS_IMPORT_ERROR)
+ if URLLIB3_IMPORT_ERROR is not None:
+ from .errors import MissingRequirementException
+
+ raise MissingRequirementException(
+ 'You have to install urllib3',
+ 'urllib3', URLLIB3_IMPORT_ERROR)
+ if BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR is not None:
+ from .errors import MissingRequirementException
+
+ raise MissingRequirementException(
+ 'You have to install backports.ssl-match-hostname',
+ 'backports.ssl-match-hostname', BACKPORTS_SSL_MATCH_HOSTNAME_IMPORT_ERROR)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py b/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py
new file mode 100644
index 000000000..d9ec5870d
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py
@@ -0,0 +1,606 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import logging
+import struct
+from functools import partial
+
+from ansible.module_utils.six import PY3, binary_type, iteritems, string_types, raise_from
+from ansible.module_utils.six.moves.urllib.parse import quote
+
+from .. import auth
+from .._import_helper import fail_on_missing_imports
+from .._import_helper import HTTPError as _HTTPError
+from .._import_helper import InvalidSchema as _InvalidSchema
+from .._import_helper import Session as _Session
+from ..constants import (DEFAULT_NUM_POOLS, DEFAULT_NUM_POOLS_SSH,
+ DEFAULT_MAX_POOL_SIZE, DEFAULT_TIMEOUT_SECONDS,
+ DEFAULT_USER_AGENT, IS_WINDOWS_PLATFORM,
+ MINIMUM_DOCKER_API_VERSION, STREAM_HEADER_SIZE_BYTES,
+ DEFAULT_DATA_CHUNK_SIZE)
+from ..errors import (DockerException, InvalidVersion, TLSParameterError, MissingRequirementException,
+ create_api_error_from_http_exception)
+from ..tls import TLSConfig
+from ..transport.npipeconn import NpipeHTTPAdapter
+from ..transport.npipesocket import PYWIN32_IMPORT_ERROR
+from ..transport.unixconn import UnixHTTPAdapter
+from ..transport.sshconn import SSHHTTPAdapter, PARAMIKO_IMPORT_ERROR
+from ..transport.ssladapter import SSLHTTPAdapter
+from ..utils import config, utils, json_stream
+from ..utils.decorators import check_resource, update_headers
+from ..utils.proxy import ProxyConfig
+from ..utils.socket import consume_socket_output, demux_adaptor, frames_iter
+
+from .daemon import DaemonApiMixin
+
+
+log = logging.getLogger(__name__)
+
+
+class APIClient(
+ _Session,
+ DaemonApiMixin):
+ """
+ A low-level client for the Docker Engine API.
+
+ Example:
+
+ >>> import docker
+ >>> client = docker.APIClient(base_url='unix://var/run/docker.sock')
+ >>> client.version()
+ {u'ApiVersion': u'1.33',
+ u'Arch': u'amd64',
+ u'BuildTime': u'2017-11-19T18:46:37.000000000+00:00',
+ u'GitCommit': u'f4ffd2511c',
+ u'GoVersion': u'go1.9.2',
+ u'KernelVersion': u'4.14.3-1-ARCH',
+ u'MinAPIVersion': u'1.12',
+ u'Os': u'linux',
+ u'Version': u'17.10.0-ce'}
+
+ Args:
+ base_url (str): URL to the Docker server. For example,
+ ``unix:///var/run/docker.sock`` or ``tcp://127.0.0.1:1234``.
+ version (str): The version of the API to use. Set to ``auto`` to
+ automatically detect the server's version. Default: ``1.35``
+ timeout (int): Default timeout for API calls, in seconds.
+ tls (bool or :py:class:`~docker.tls.TLSConfig`): Enable TLS. Pass
+ ``True`` to enable it with default options, or pass a
+ :py:class:`~docker.tls.TLSConfig` object to use custom
+ configuration.
+ user_agent (str): Set a custom user agent for requests to the server.
+ credstore_env (dict): Override environment variables when calling the
+ credential store process.
+ use_ssh_client (bool): If set to `True`, an ssh connection is made
+ via shelling out to the ssh client. Ensure the ssh client is
+ installed and configured on the host.
+ max_pool_size (int): The maximum number of connections
+ to save in the pool.
+ """
+
+ __attrs__ = _Session.__attrs__ + ['_auth_configs',
+ '_general_configs',
+ '_version',
+ 'base_url',
+ 'timeout']
+
+ def __init__(self, base_url=None, version=None,
+ timeout=DEFAULT_TIMEOUT_SECONDS, tls=False,
+ user_agent=DEFAULT_USER_AGENT, num_pools=None,
+ credstore_env=None, use_ssh_client=False,
+ max_pool_size=DEFAULT_MAX_POOL_SIZE):
+ super(APIClient, self).__init__()
+
+ fail_on_missing_imports()
+
+ if tls and not base_url:
+ raise TLSParameterError(
+ 'If using TLS, the base_url argument must be provided.'
+ )
+
+ self.base_url = base_url
+ self.timeout = timeout
+ self.headers['User-Agent'] = user_agent
+
+ self._general_configs = config.load_general_config()
+
+ proxy_config = self._general_configs.get('proxies', {})
+ try:
+ proxies = proxy_config[base_url]
+ except KeyError:
+ proxies = proxy_config.get('default', {})
+
+ self._proxy_configs = ProxyConfig.from_dict(proxies)
+
+ self._auth_configs = auth.load_config(
+ config_dict=self._general_configs, credstore_env=credstore_env,
+ )
+ self.credstore_env = credstore_env
+
+ base_url = utils.parse_host(
+ base_url, IS_WINDOWS_PLATFORM, tls=bool(tls)
+ )
+ # SSH has a different default for num_pools to all other adapters
+ num_pools = num_pools or DEFAULT_NUM_POOLS_SSH if \
+ base_url.startswith('ssh://') else DEFAULT_NUM_POOLS
+
+ if base_url.startswith('http+unix://'):
+ self._custom_adapter = UnixHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ # host part of URL should be unused, but is resolved by requests
+ # module in proxy_bypass_macosx_sysconf()
+ self.base_url = 'http+docker://localhost'
+ elif base_url.startswith('npipe://'):
+ if not IS_WINDOWS_PLATFORM:
+ raise DockerException(
+ 'The npipe:// protocol is only supported on Windows'
+ )
+ if PYWIN32_IMPORT_ERROR is not None:
+ raise MissingRequirementException(
+ 'Install pypiwin32 package to enable npipe:// support',
+ 'pywin32',
+ PYWIN32_IMPORT_ERROR)
+ self._custom_adapter = NpipeHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size
+ )
+ self.mount('http+docker://', self._custom_adapter)
+ self.base_url = 'http+docker://localnpipe'
+ elif base_url.startswith('ssh://'):
+ if PARAMIKO_IMPORT_ERROR is not None and not use_ssh_client:
+ raise MissingRequirementException(
+ 'Install paramiko package to enable ssh:// support',
+ 'paramiko',
+ PARAMIKO_IMPORT_ERROR)
+ self._custom_adapter = SSHHTTPAdapter(
+ base_url, timeout, pool_connections=num_pools,
+ max_pool_size=max_pool_size, shell_out=use_ssh_client
+ )
+ self.mount('http+docker://ssh', self._custom_adapter)
+ self._unmount('http://', 'https://')
+ self.base_url = 'http+docker://ssh'
+ else:
+ # Use SSLAdapter for the ability to specify SSL version
+ if isinstance(tls, TLSConfig):
+ tls.configure_client(self)
+ elif tls:
+ self._custom_adapter = SSLHTTPAdapter(
+ pool_connections=num_pools)
+ self.mount('https://', self._custom_adapter)
+ self.base_url = base_url
+
+ # version detection needs to be after unix adapter mounting
+ if version is None or (isinstance(version, string_types) and version.lower() == 'auto'):
+ self._version = self._retrieve_server_version()
+ else:
+ self._version = version
+ if not isinstance(self._version, string_types):
+ raise DockerException(
+ 'Version parameter must be a string or None. Found {0}'.format(
+ type(version).__name__
+ )
+ )
+ if utils.version_lt(self._version, MINIMUM_DOCKER_API_VERSION):
+ raise InvalidVersion(
+ 'API versions below {0} are no longer supported by this '
+ 'library.'.format(MINIMUM_DOCKER_API_VERSION)
+ )
+
+ def _retrieve_server_version(self):
+ try:
+ return self.version(api_version=False)["ApiVersion"]
+ except KeyError:
+ raise DockerException(
+ 'Invalid response from docker daemon: key "ApiVersion"'
+ ' is missing.'
+ )
+ except Exception as e:
+ raise DockerException(
+ 'Error while fetching server API version: {0}'.format(e)
+ )
+
+ def _set_request_timeout(self, kwargs):
+ """Prepare the kwargs for an HTTP request by inserting the timeout
+ parameter, if not already present."""
+ kwargs.setdefault('timeout', self.timeout)
+ return kwargs
+
+ @update_headers
+ def _post(self, url, **kwargs):
+ return self.post(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _get(self, url, **kwargs):
+ return self.get(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _head(self, url, **kwargs):
+ return self.head(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _put(self, url, **kwargs):
+ return self.put(url, **self._set_request_timeout(kwargs))
+
+ @update_headers
+ def _delete(self, url, **kwargs):
+ return self.delete(url, **self._set_request_timeout(kwargs))
+
+ def _url(self, pathfmt, *args, **kwargs):
+ for arg in args:
+ if not isinstance(arg, string_types):
+ raise ValueError(
+ 'Expected a string but found {0} ({1}) '
+ 'instead'.format(arg, type(arg))
+ )
+
+ quote_f = partial(quote, safe="/:")
+ args = map(quote_f, args)
+
+ if kwargs.get('versioned_api', True):
+ return '{0}/v{1}{2}'.format(
+ self.base_url, self._version, pathfmt.format(*args)
+ )
+ else:
+ return '{0}{1}'.format(self.base_url, pathfmt.format(*args))
+
+ def _raise_for_status(self, response):
+ """Raises stored :class:`APIError`, if one occurred."""
+ try:
+ response.raise_for_status()
+ except _HTTPError as e:
+ raise_from(create_api_error_from_http_exception(e), e)
+
+ def _result(self, response, json=False, binary=False):
+ if json and binary:
+ raise AssertionError('json and binary must not be both True')
+ self._raise_for_status(response)
+
+ if json:
+ return response.json()
+ if binary:
+ return response.content
+ return response.text
+
+ def _post_json(self, url, data, **kwargs):
+ # Go <1.1 can't unserialize null to a string
+ # so we do this disgusting thing here.
+ data2 = {}
+ if data is not None and isinstance(data, dict):
+ for k, v in iteritems(data):
+ if v is not None:
+ data2[k] = v
+ elif data is not None:
+ data2 = data
+
+ if 'headers' not in kwargs:
+ kwargs['headers'] = {}
+ kwargs['headers']['Content-Type'] = 'application/json'
+ return self._post(url, data=json.dumps(data2), **kwargs)
+
+ def _attach_params(self, override=None):
+ return override or {
+ 'stdout': 1,
+ 'stderr': 1,
+ 'stream': 1
+ }
+
+ def _get_raw_response_socket(self, response):
+ self._raise_for_status(response)
+ if self.base_url == "http+docker://localnpipe":
+ sock = response.raw._fp.fp.raw.sock
+ elif self.base_url.startswith('http+docker://ssh'):
+ sock = response.raw._fp.fp.channel
+ elif PY3:
+ sock = response.raw._fp.fp.raw
+ if self.base_url.startswith("https://"):
+ sock = sock._sock
+ else:
+ sock = response.raw._fp.fp._sock
+ try:
+ # Keep a reference to the response to stop it being garbage
+ # collected. If the response is garbage collected, it will
+ # close TLS sockets.
+ sock._response = response
+ except AttributeError:
+ # UNIX sockets can't have attributes set on them, but that's
+ # fine because we won't be doing TLS over them
+ pass
+
+ return sock
+
+ def _stream_helper(self, response, decode=False):
+ """Generator for data coming from a chunked-encoded HTTP response."""
+
+ if response.raw._fp.chunked:
+ if decode:
+ for chunk in json_stream.json_stream(self._stream_helper(response, False)):
+ yield chunk
+ else:
+ reader = response.raw
+ while not reader.closed:
+ # this read call will block until we get a chunk
+ data = reader.read(1)
+ if not data:
+ break
+ if reader._fp.chunk_left:
+ data += reader.read(reader._fp.chunk_left)
+ yield data
+ else:
+ # Response isn't chunked, meaning we probably
+ # encountered an error immediately
+ yield self._result(response, json=decode)
+
+ def _multiplexed_buffer_helper(self, response):
+ """A generator of multiplexed data blocks read from a buffered
+ response."""
+ buf = self._result(response, binary=True)
+ buf_length = len(buf)
+ walker = 0
+ while True:
+ if buf_length - walker < STREAM_HEADER_SIZE_BYTES:
+ break
+ header = buf[walker:walker + STREAM_HEADER_SIZE_BYTES]
+ dummy, length = struct.unpack_from('>BxxxL', header)
+ start = walker + STREAM_HEADER_SIZE_BYTES
+ end = start + length
+ walker = end
+ yield buf[start:end]
+
+ def _multiplexed_response_stream_helper(self, response):
+ """A generator of multiplexed data blocks coming from a response
+ stream."""
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ while True:
+ header = response.raw.read(STREAM_HEADER_SIZE_BYTES)
+ if not header:
+ break
+ dummy, length = struct.unpack('>BxxxL', header)
+ if not length:
+ continue
+ data = response.raw.read(length)
+ if not data:
+ break
+ yield data
+
+ def _stream_raw_result(self, response, chunk_size=1, decode=True):
+ ''' Stream result for TTY-enabled container and raw binary data'''
+ self._raise_for_status(response)
+
+ # Disable timeout on the underlying socket to prevent
+ # Read timed out(s) for long running processes
+ socket = self._get_raw_response_socket(response)
+ self._disable_socket_timeout(socket)
+
+ for out in response.iter_content(chunk_size, decode):
+ yield out
+
+ def _read_from_socket(self, response, stream, tty=True, demux=False):
+ """Consume all data from the socket, close the response and return the
+ data. If stream=True, then a generator is returned instead and the
+ caller is responsible for closing the response.
+ """
+ socket = self._get_raw_response_socket(response)
+
+ gen = frames_iter(socket, tty)
+
+ if demux:
+ # The generator will output tuples (stdout, stderr)
+ gen = (demux_adaptor(*frame) for frame in gen)
+ else:
+ # The generator will output strings
+ gen = (data for (dummy, data) in gen)
+
+ if stream:
+ return gen
+ else:
+ try:
+ # Wait for all the frames, concatenate them, and return the result
+ return consume_socket_output(gen, demux=demux)
+ finally:
+ response.close()
+
+ def _disable_socket_timeout(self, socket):
+ """ Depending on the combination of python version and whether we're
+ connecting over http or https, we might need to access _sock, which
+ may or may not exist; or we may need to just settimeout on socket
+ itself, which also may or may not have settimeout on it. To avoid
+ missing the correct one, we try both.
+
+ We also do not want to set the timeout if it is already disabled, as
+ you run the risk of changing a socket that was non-blocking to
+ blocking, for example when using gevent.
+ """
+ sockets = [socket, getattr(socket, '_sock', None)]
+
+ for s in sockets:
+ if not hasattr(s, 'settimeout'):
+ continue
+
+ timeout = -1
+
+ if hasattr(s, 'gettimeout'):
+ timeout = s.gettimeout()
+
+ # Don't change the timeout if it is already disabled.
+ if timeout is None or timeout == 0.0:
+ continue
+
+ s.settimeout(None)
+
+ @check_resource('container')
+ def _check_is_tty(self, container):
+ cont = self.inspect_container(container)
+ return cont['Config']['Tty']
+
+ def _get_result(self, container, stream, res):
+ return self._get_result_tty(stream, res, self._check_is_tty(container))
+
+ def _get_result_tty(self, stream, res, is_tty):
+ # We should also use raw streaming (without keep-alives)
+ # if we're dealing with a tty-enabled container.
+ if is_tty:
+ return self._stream_raw_result(res) if stream else \
+ self._result(res, binary=True)
+
+ self._raise_for_status(res)
+ sep = binary_type()
+ if stream:
+ return self._multiplexed_response_stream_helper(res)
+ else:
+ return sep.join(
+ list(self._multiplexed_buffer_helper(res))
+ )
+
+ def _unmount(self, *args):
+ for proto in args:
+ self.adapters.pop(proto)
+
+ def get_adapter(self, url):
+ try:
+ return super(APIClient, self).get_adapter(url)
+ except _InvalidSchema as e:
+ if self._custom_adapter:
+ return self._custom_adapter
+ else:
+ raise e
+
+ @property
+ def api_version(self):
+ return self._version
+
+ def reload_config(self, dockercfg_path=None):
+ """
+ Force a reload of the auth configuration
+
+ Args:
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise ``$HOME/.dockercfg``)
+
+ Returns:
+ None
+ """
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
+
+ def _set_auth_headers(self, headers):
+ log.debug('Looking for auth config')
+
+ # If we don't have any auth data so far, try reloading the config
+ # file one more time in case anything showed up in there.
+ if not self._auth_configs or self._auth_configs.is_empty:
+ log.debug("No auth config in memory - loading from filesystem")
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
+
+ # Send the full auth configuration (if any exists), since the build
+ # could use any (or all) of the registries.
+ if self._auth_configs:
+ auth_data = self._auth_configs.get_all_credentials()
+
+ # See https://github.com/docker/docker-py/issues/1683
+ if (auth.INDEX_URL not in auth_data and
+ auth.INDEX_NAME in auth_data):
+ auth_data[auth.INDEX_URL] = auth_data.get(auth.INDEX_NAME, {})
+
+ log.debug(
+ 'Sending auth config (%s)',
+ ', '.join(repr(k) for k in auth_data.keys())
+ )
+
+ if auth_data:
+ headers['X-Registry-Config'] = auth.encode_header(
+ auth_data
+ )
+ else:
+ log.debug('No auth config found')
+
+ def get_binary(self, pathfmt, *args, **kwargs):
+ return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), binary=True)
+
+ def get_json(self, pathfmt, *args, **kwargs):
+ return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
+
+ def get_text(self, pathfmt, *args, **kwargs):
+ return self._result(self._get(self._url(pathfmt, *args, versioned_api=True), **kwargs))
+
+ def get_raw_stream(self, pathfmt, *args, **kwargs):
+ chunk_size = kwargs.pop('chunk_size', DEFAULT_DATA_CHUNK_SIZE)
+ res = self._get(self._url(pathfmt, *args, versioned_api=True), stream=True, **kwargs)
+ self._raise_for_status(res)
+ return self._stream_raw_result(res, chunk_size, False)
+
+ def delete_call(self, pathfmt, *args, **kwargs):
+ self._raise_for_status(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs))
+
+ def delete_json(self, pathfmt, *args, **kwargs):
+ return self._result(self._delete(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
+
+ def post_call(self, pathfmt, *args, **kwargs):
+ self._raise_for_status(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs))
+
+ def post_json(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ self._raise_for_status(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs))
+
+ def post_json_to_binary(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), binary=True)
+
+ def post_json_to_json(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ return self._result(self._post_json(self._url(pathfmt, *args, versioned_api=True), data, **kwargs), json=True)
+
+ def post_json_to_text(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+
+ def post_json_to_stream_socket(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ headers = (kwargs.pop('headers', None) or {}).copy()
+ headers.update({
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp',
+ })
+ return self._get_raw_response_socket(
+ self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs))
+
+ def post_json_to_stream(self, pathfmt, *args, **kwargs):
+ data = kwargs.pop('data', None)
+ headers = (kwargs.pop('headers', None) or {}).copy()
+ headers.update({
+ 'Connection': 'Upgrade',
+ 'Upgrade': 'tcp',
+ })
+ stream = kwargs.pop('stream', False)
+ demux = kwargs.pop('demux', False)
+ tty = kwargs.pop('tty', False)
+ return self._read_from_socket(
+ self._post_json(self._url(pathfmt, *args, versioned_api=True), data, headers=headers, stream=True, **kwargs),
+ stream,
+ tty=tty,
+ demux=demux
+ )
+
+ def post_to_json(self, pathfmt, *args, **kwargs):
+ return self._result(self._post(self._url(pathfmt, *args, versioned_api=True), **kwargs), json=True)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/api/daemon.py b/ansible_collections/community/docker/plugins/module_utils/_api/api/daemon.py
new file mode 100644
index 000000000..9e7adbf3f
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/api/daemon.py
@@ -0,0 +1,196 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+from datetime import datetime
+
+from .. import auth
+from ..utils.utils import datetime_to_timestamp, convert_filters
+from ..utils.decorators import minimum_version
+from ..types.daemon import CancellableStream
+
+
+class DaemonApiMixin(object):
+ @minimum_version('1.25')
+ def df(self):
+ """
+ Get data usage information.
+
+ Returns:
+ (dict): A dictionary representing different resource categories
+ and their respective data usage.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url('/system/df')
+ return self._result(self._get(url), True)
+
+ def events(self, since=None, until=None, filters=None, decode=None):
+ """
+ Get real-time events from the server. Similar to the ``docker events``
+ command.
+
+ Args:
+ since (UTC datetime or int): Get events from this point
+ until (UTC datetime or int): Get events until this point
+ filters (dict): Filter the events by event time, container or image
+ decode (bool): If set to true, stream will be decoded into dicts on
+ the fly. False by default.
+
+ Returns:
+ A :py:class:`docker.types.daemon.CancellableStream` generator
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+
+ Example:
+
+ >>> for event in client.events(decode=True)
+ ... print(event)
+ {u'from': u'image/with:tag',
+ u'id': u'container-id',
+ u'status': u'start',
+ u'time': 1423339459}
+ ...
+
+ or
+
+ >>> events = client.events()
+ >>> for event in events:
+ ... print(event)
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ if isinstance(since, datetime):
+ since = datetime_to_timestamp(since)
+
+ if isinstance(until, datetime):
+ until = datetime_to_timestamp(until)
+
+ if filters:
+ filters = convert_filters(filters)
+
+ params = {
+ 'since': since,
+ 'until': until,
+ 'filters': filters
+ }
+ url = self._url('/events')
+
+ response = self._get(url, params=params, stream=True, timeout=None)
+ stream = self._stream_helper(response, decode=decode)
+
+ return CancellableStream(stream, response)
+
+ def info(self):
+ """
+ Display system-wide information. Identical to the ``docker info``
+ command.
+
+ Returns:
+ (dict): The info as a dict
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url("/info")), True)
+
+ def login(self, username, password=None, email=None, registry=None,
+ reauth=False, dockercfg_path=None):
+ """
+ Authenticate with a registry. Similar to the ``docker login`` command.
+
+ Args:
+ username (str): The registry username
+ password (str): The plaintext password
+ email (str): The email for the registry account
+ registry (str): URL to the registry. E.g.
+ ``https://index.docker.io/v1/``
+ reauth (bool): Whether or not to refresh existing authentication on
+ the Docker server.
+ dockercfg_path (str): Use a custom path for the Docker config file
+ (default ``$HOME/.docker/config.json`` if present,
+ otherwise ``$HOME/.dockercfg``)
+
+ Returns:
+ (dict): The response from the login request
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+
+ # If we don't have any auth data so far, try reloading the config file
+ # one more time in case anything showed up in there.
+ # If dockercfg_path is passed check to see if the config file exists,
+ # if so load that config.
+ if dockercfg_path and os.path.exists(dockercfg_path):
+ self._auth_configs = auth.load_config(
+ dockercfg_path, credstore_env=self.credstore_env
+ )
+ elif not self._auth_configs or self._auth_configs.is_empty:
+ self._auth_configs = auth.load_config(
+ credstore_env=self.credstore_env
+ )
+
+ authcfg = self._auth_configs.resolve_authconfig(registry)
+ # If we found an existing auth config for this registry and username
+ # combination, we can return it immediately unless reauth is requested.
+ if authcfg and authcfg.get('username', None) == username \
+ and not reauth:
+ return authcfg
+
+ req_data = {
+ 'username': username,
+ 'password': password,
+ 'email': email,
+ 'serveraddress': registry,
+ }
+
+ response = self._post_json(self._url('/auth'), data=req_data)
+ if response.status_code == 200:
+ self._auth_configs.add_auth(registry or auth.INDEX_NAME, req_data)
+ return self._result(response, json=True)
+
+ def ping(self):
+ """
+ Checks the server is responsive. An exception will be raised if it
+ isn't responding.
+
+ Returns:
+ (bool) The response from the server.
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ return self._result(self._get(self._url('/_ping'))) == 'OK'
+
+ def version(self, api_version=True):
+ """
+ Returns version information from the server. Similar to the ``docker
+ version`` command.
+
+ Returns:
+ (dict): The server version information
+
+ Raises:
+ :py:class:`docker.errors.APIError`
+ If the server returns an error.
+ """
+ url = self._url("/version", versioned_api=api_version)
+ return self._result(self._get(url), json=True)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/auth.py b/ansible_collections/community/docker/plugins/module_utils/_api/auth.py
new file mode 100644
index 000000000..a172ced55
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/auth.py
@@ -0,0 +1,388 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import json
+import logging
+
+from ansible.module_utils.six import iteritems, string_types
+
+from . import errors
+from .credentials.store import Store
+from .credentials.errors import StoreError, CredentialsNotFound
+from .utils import config
+
+INDEX_NAME = 'docker.io'
+INDEX_URL = 'https://index.{0}/v1/'.format(INDEX_NAME)
+TOKEN_USERNAME = '<token>'
+
+log = logging.getLogger(__name__)
+
+
+def resolve_repository_name(repo_name):
+ if '://' in repo_name:
+ raise errors.InvalidRepository(
+ 'Repository name cannot contain a scheme ({0})'.format(repo_name)
+ )
+
+ index_name, remote_name = split_repo_name(repo_name)
+ if index_name[0] == '-' or index_name[-1] == '-':
+ raise errors.InvalidRepository(
+ 'Invalid index name ({0}). Cannot begin or end with a'
+ ' hyphen.'.format(index_name)
+ )
+ return resolve_index_name(index_name), remote_name
+
+
+def resolve_index_name(index_name):
+ index_name = convert_to_hostname(index_name)
+ if index_name == 'index.' + INDEX_NAME:
+ index_name = INDEX_NAME
+ return index_name
+
+
+def get_config_header(client, registry):
+ log.debug('Looking for auth config')
+ if not client._auth_configs or client._auth_configs.is_empty:
+ log.debug(
+ "No auth config in memory - loading from filesystem"
+ )
+ client._auth_configs = load_config(credstore_env=client.credstore_env)
+ authcfg = resolve_authconfig(
+ client._auth_configs, registry, credstore_env=client.credstore_env
+ )
+ # Do not fail here if no authentication exists for this
+ # specific registry as we can have a readonly pull. Just
+ # put the header if we can.
+ if authcfg:
+ log.debug('Found auth config')
+ # auth_config needs to be a dict in the format used by
+ # auth.py username , password, serveraddress, email
+ return encode_header(authcfg)
+ log.debug('No auth config found')
+ return None
+
+
+def split_repo_name(repo_name):
+ parts = repo_name.split('/', 1)
+ if len(parts) == 1 or (
+ '.' not in parts[0] and ':' not in parts[0] and parts[0] != 'localhost'
+ ):
+ # This is a docker index repo (ex: username/foobar or ubuntu)
+ return INDEX_NAME, repo_name
+ return tuple(parts)
+
+
+def get_credential_store(authconfig, registry):
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig)
+ return authconfig.get_credential_store(registry)
+
+
+class AuthConfig(dict):
+ def __init__(self, dct, credstore_env=None):
+ if 'auths' not in dct:
+ dct['auths'] = {}
+ self.update(dct)
+ self._credstore_env = credstore_env
+ self._stores = {}
+
+ @classmethod
+ def parse_auth(cls, entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ conf = {}
+ for registry, entry in iteritems(entries):
+ if not isinstance(entry, dict):
+ log.debug('Config entry for key %s is not auth config', registry)
+ # We sometimes fall back to parsing the whole config as if it
+ # was the auth config by itself, for legacy purposes. In that
+ # case, we fail silently and return an empty conf if any of the
+ # keys is not formatted properly.
+ if raise_on_error:
+ raise errors.InvalidConfigFile(
+ 'Invalid configuration for registry {0}'.format(
+ registry
+ )
+ )
+ return {}
+ if 'identitytoken' in entry:
+ log.debug('Found an IdentityToken entry for registry %s', registry)
+ conf[registry] = {
+ 'IdentityToken': entry['identitytoken']
+ }
+ continue # Other values are irrelevant if we have a token
+
+ if 'auth' not in entry:
+ # Starting with engine v1.11 (API 1.23), an empty dictionary is
+ # a valid value in the auths config.
+ # https://github.com/docker/compose/issues/3265
+ log.debug('Auth data for %s is absent. Client might be using a credentials store instead.', registry)
+ conf[registry] = {}
+ continue
+
+ username, password = decode_auth(entry['auth'])
+ log.debug('Found entry (registry=%s, username=%s)', repr(registry), repr(username))
+
+ conf[registry] = {
+ 'username': username,
+ 'password': password,
+ 'email': entry.get('email'),
+ 'serveraddress': registry,
+ }
+ return conf
+
+ @classmethod
+ def load_config(cls, config_path, config_dict, credstore_env=None):
+ """
+ Loads authentication data from a Docker configuration file in the given
+ root directory or if config_path is passed use given path.
+ Lookup priority:
+ explicit config_path parameter > DOCKER_CONFIG environment
+ variable > ~/.docker/config.json > ~/.dockercfg
+ """
+
+ if not config_dict:
+ config_file = config.find_config_file(config_path)
+
+ if not config_file:
+ return cls({}, credstore_env)
+ try:
+ with open(config_file) as f:
+ config_dict = json.load(f)
+ except (IOError, KeyError, ValueError) as e:
+ # Likely missing new Docker config file or it's in an
+ # unknown format, continue to attempt to read old location
+ # and format.
+ log.debug(e)
+ return cls(_load_legacy_config(config_file), credstore_env)
+
+ res = {}
+ if config_dict.get('auths'):
+ log.debug("Found 'auths' section")
+ res.update({
+ 'auths': cls.parse_auth(
+ config_dict.pop('auths'), raise_on_error=True
+ )
+ })
+ if config_dict.get('credsStore'):
+ log.debug("Found 'credsStore' section")
+ res.update({'credsStore': config_dict.pop('credsStore')})
+ if config_dict.get('credHelpers'):
+ log.debug("Found 'credHelpers' section")
+ res.update({'credHelpers': config_dict.pop('credHelpers')})
+ if res:
+ return cls(res, credstore_env)
+
+ log.debug(
+ "Couldn't find auth-related section ; attempting to interpret "
+ "as auth-only file"
+ )
+ return cls({'auths': cls.parse_auth(config_dict)}, credstore_env)
+
+ @property
+ def auths(self):
+ return self.get('auths', {})
+
+ @property
+ def creds_store(self):
+ return self.get('credsStore', None)
+
+ @property
+ def cred_helpers(self):
+ return self.get('credHelpers', {})
+
+ @property
+ def is_empty(self):
+ return (
+ not self.auths and not self.creds_store and not self.cred_helpers
+ )
+
+ def resolve_authconfig(self, registry=None):
+ """
+ Returns the authentication data from the given auth configuration for a
+ specific registry. As with the Docker client, legacy entries in the
+ config with full URLs are stripped down to hostnames before checking
+ for a match. Returns None if no match was found.
+ """
+
+ if self.creds_store or self.cred_helpers:
+ store_name = self.get_credential_store(registry)
+ if store_name is not None:
+ log.debug('Using credentials store "%s"', store_name)
+ cfg = self._resolve_authconfig_credstore(registry, store_name)
+ if cfg is not None:
+ return cfg
+ log.debug('No entry in credstore - fetching from auth dict')
+
+ # Default to the public index server
+ registry = resolve_index_name(registry) if registry else INDEX_NAME
+ log.debug("Looking for auth entry for %s", repr(registry))
+
+ if registry in self.auths:
+ log.debug("Found %s", repr(registry))
+ return self.auths[registry]
+
+ for key, conf in iteritems(self.auths):
+ if resolve_index_name(key) == registry:
+ log.debug("Found %s", repr(key))
+ return conf
+
+ log.debug("No entry found")
+ return None
+
+ def _resolve_authconfig_credstore(self, registry, credstore_name):
+ if not registry or registry == INDEX_NAME:
+ # The ecosystem is a little schizophrenic with index.docker.io VS
+ # docker.io - in that case, it seems the full URL is necessary.
+ registry = INDEX_URL
+ log.debug("Looking for auth entry for %s", repr(registry))
+ store = self._get_store_instance(credstore_name)
+ try:
+ data = store.get(registry)
+ res = {
+ 'ServerAddress': registry,
+ }
+ if data['Username'] == TOKEN_USERNAME:
+ res['IdentityToken'] = data['Secret']
+ else:
+ res.update({
+ 'Username': data['Username'],
+ 'Password': data['Secret'],
+ })
+ return res
+ except CredentialsNotFound:
+ log.debug('No entry found')
+ return None
+ except StoreError as e:
+ raise errors.DockerException(
+ 'Credentials store error: {0}'.format(repr(e))
+ )
+
+ def _get_store_instance(self, name):
+ if name not in self._stores:
+ self._stores[name] = Store(
+ name, environment=self._credstore_env
+ )
+ return self._stores[name]
+
+ def get_credential_store(self, registry):
+ if not registry or registry == INDEX_NAME:
+ registry = INDEX_URL
+
+ return self.cred_helpers.get(registry) or self.creds_store
+
+ def get_all_credentials(self):
+ auth_data = self.auths.copy()
+ if self.creds_store:
+ # Retrieve all credentials from the default store
+ store = self._get_store_instance(self.creds_store)
+ for k in store.list().keys():
+ auth_data[k] = self._resolve_authconfig_credstore(
+ k, self.creds_store
+ )
+ auth_data[convert_to_hostname(k)] = auth_data[k]
+
+ # credHelpers entries take priority over all others
+ for reg, store_name in self.cred_helpers.items():
+ auth_data[reg] = self._resolve_authconfig_credstore(
+ reg, store_name
+ )
+ auth_data[convert_to_hostname(reg)] = auth_data[reg]
+
+ return auth_data
+
+ def add_auth(self, reg, data):
+ self['auths'][reg] = data
+
+
+def resolve_authconfig(authconfig, registry=None, credstore_env=None):
+ if not isinstance(authconfig, AuthConfig):
+ authconfig = AuthConfig(authconfig, credstore_env)
+ return authconfig.resolve_authconfig(registry)
+
+
+def convert_to_hostname(url):
+ return url.replace('http://', '').replace('https://', '').split('/', 1)[0]
+
+
+def decode_auth(auth):
+ if isinstance(auth, string_types):
+ auth = auth.encode('ascii')
+ s = base64.b64decode(auth)
+ login, pwd = s.split(b':', 1)
+ return login.decode('utf8'), pwd.decode('utf8')
+
+
+def encode_header(auth):
+ auth_json = json.dumps(auth).encode('ascii')
+ return base64.urlsafe_b64encode(auth_json)
+
+
+def parse_auth(entries, raise_on_error=False):
+ """
+ Parses authentication entries
+
+ Args:
+ entries: Dict of authentication entries.
+ raise_on_error: If set to true, an invalid format will raise
+ InvalidConfigFile
+
+ Returns:
+ Authentication registry.
+ """
+
+ return AuthConfig.parse_auth(entries, raise_on_error)
+
+
+def load_config(config_path=None, config_dict=None, credstore_env=None):
+ return AuthConfig.load_config(config_path, config_dict, credstore_env)
+
+
+def _load_legacy_config(config_file):
+ log.debug("Attempting to parse legacy auth file format")
+ try:
+ data = []
+ with open(config_file) as f:
+ for line in f.readlines():
+ data.append(line.strip().split(' = ')[1])
+ if len(data) < 2:
+ # Not enough data
+ raise errors.InvalidConfigFile(
+ 'Invalid or empty configuration file!'
+ )
+
+ username, password = decode_auth(data[0])
+ return {'auths': {
+ INDEX_NAME: {
+ 'username': username,
+ 'password': password,
+ 'email': data[1],
+ 'serveraddress': INDEX_URL,
+ }
+ }}
+ except Exception as e:
+ log.debug(e)
+ pass
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/constants.py b/ansible_collections/community/docker/plugins/module_utils/_api/constants.py
new file mode 100644
index 000000000..23e10b136
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/constants.py
@@ -0,0 +1,50 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import sys
+
+DEFAULT_DOCKER_API_VERSION = '1.41'
+MINIMUM_DOCKER_API_VERSION = '1.21'
+DEFAULT_TIMEOUT_SECONDS = 60
+STREAM_HEADER_SIZE_BYTES = 8
+CONTAINER_LIMITS_KEYS = [
+ 'memory', 'memswap', 'cpushares', 'cpusetcpus'
+]
+
+DEFAULT_HTTP_HOST = "127.0.0.1"
+DEFAULT_UNIX_SOCKET = "http+unix:///var/run/docker.sock"
+DEFAULT_NPIPE = 'npipe:////./pipe/docker_engine'
+
+BYTE_UNITS = {
+ 'b': 1,
+ 'k': 1024,
+ 'm': 1024 * 1024,
+ 'g': 1024 * 1024 * 1024
+}
+
+IS_WINDOWS_PLATFORM = (sys.platform == 'win32')
+WINDOWS_LONGPATH_PREFIX = '\\\\?\\'
+
+DEFAULT_USER_AGENT = "ansible-community.docker"
+DEFAULT_NUM_POOLS = 25
+
+# The OpenSSH server default value for MaxSessions is 10 which means we can
+# use up to 9, leaving the final session for the underlying SSH connection.
+# For more details see: https://github.com/docker/docker-py/issues/2246
+DEFAULT_NUM_POOLS_SSH = 9
+
+DEFAULT_MAX_POOL_SIZE = 10
+
+DEFAULT_DATA_CHUNK_SIZE = 1024 * 2048
+
+DEFAULT_SWARM_ADDR_POOL = ['10.0.0.0/8']
+DEFAULT_SWARM_SUBNET_SIZE = 24
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/credentials/constants.py b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/constants.py
new file mode 100644
index 000000000..c52bc564e
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/constants.py
@@ -0,0 +1,16 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+PROGRAM_PREFIX = 'docker-credential-'
+DEFAULT_LINUX_STORE = 'secretservice'
+DEFAULT_OSX_STORE = 'osxkeychain'
+DEFAULT_WIN32_STORE = 'wincred'
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/credentials/errors.py b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/errors.py
new file mode 100644
index 000000000..0047e8e4c
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/errors.py
@@ -0,0 +1,38 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class StoreError(RuntimeError):
+ pass
+
+
+class CredentialsNotFound(StoreError):
+ pass
+
+
+class InitializationError(StoreError):
+ pass
+
+
+def process_store_error(cpe, program):
+ message = cpe.output.decode('utf-8')
+ if 'credentials not found in native keychain' in message:
+ return CredentialsNotFound(
+ 'No matching credentials in {0}'.format(
+ program
+ )
+ )
+ return StoreError(
+ 'Credentials store {0} exited with "{1}".'.format(
+ program, cpe.output.decode('utf-8').strip()
+ )
+ )
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/credentials/store.py b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/store.py
new file mode 100644
index 000000000..e1bd28e3b
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/store.py
@@ -0,0 +1,119 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import json
+import subprocess
+
+from ansible.module_utils.six import PY3, binary_type
+
+from . import constants
+from . import errors
+from .utils import create_environment_dict
+from .utils import find_executable
+
+
+class Store(object):
+ def __init__(self, program, environment=None):
+ """ Create a store object that acts as an interface to
+ perform the basic operations for storing, retrieving
+ and erasing credentials using `program`.
+ """
+ self.program = constants.PROGRAM_PREFIX + program
+ self.exe = find_executable(self.program)
+ self.environment = environment
+ if self.exe is None:
+ raise errors.InitializationError(
+ '{0} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+
+ def get(self, server):
+ """ Retrieve credentials for `server`. If no credentials are found,
+ a `StoreError` will be raised.
+ """
+ if not isinstance(server, binary_type):
+ server = server.encode('utf-8')
+ data = self._execute('get', server)
+ result = json.loads(data.decode('utf-8'))
+
+ # docker-credential-pass will return an object for inexistent servers
+ # whereas other helpers will exit with returncode != 0. For
+ # consistency, if no significant data is returned,
+ # raise CredentialsNotFound
+ if result['Username'] == '' and result['Secret'] == '':
+ raise errors.CredentialsNotFound(
+ 'No matching credentials in {0}'.format(self.program)
+ )
+
+ return result
+
+ def store(self, server, username, secret):
+ """ Store credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ data_input = json.dumps({
+ 'ServerURL': server,
+ 'Username': username,
+ 'Secret': secret
+ }).encode('utf-8')
+ return self._execute('store', data_input)
+
+ def erase(self, server):
+ """ Erase credentials for `server`. Raises a `StoreError` if an error
+ occurs.
+ """
+ if not isinstance(server, binary_type):
+ server = server.encode('utf-8')
+ self._execute('erase', server)
+
+ def list(self):
+ """ List stored credentials. Requires v0.4.0+ of the helper.
+ """
+ data = self._execute('list', None)
+ return json.loads(data.decode('utf-8'))
+
+ def _execute(self, subcmd, data_input):
+ output = None
+ env = create_environment_dict(self.environment)
+ try:
+ if PY3:
+ output = subprocess.check_output(
+ [self.exe, subcmd], input=data_input, env=env,
+ )
+ else:
+ process = subprocess.Popen(
+ [self.exe, subcmd], stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, env=env,
+ )
+ output, dummy = process.communicate(data_input)
+ if process.returncode != 0:
+ raise subprocess.CalledProcessError(
+ returncode=process.returncode, cmd='', output=output
+ )
+ except subprocess.CalledProcessError as e:
+ raise errors.process_store_error(e, self.program)
+ except OSError as e:
+ if e.errno == errno.ENOENT:
+ raise errors.StoreError(
+ '{0} not installed or not available in PATH'.format(
+ self.program
+ )
+ )
+ else:
+ raise errors.StoreError(
+ 'Unexpected OS error "{0}", errno={1}'.format(
+ e.strerror, e.errno
+ )
+ )
+ return output
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/credentials/utils.py b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/utils.py
new file mode 100644
index 000000000..1ab84fe5b
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/credentials/utils.py
@@ -0,0 +1,62 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import sys
+
+from ansible.module_utils.six import PY2
+
+if PY2:
+ from distutils.spawn import find_executable as which
+else:
+ from shutil import which
+
+
+def find_executable(executable, path=None):
+ """
+ As distutils.spawn.find_executable, but on Windows, look up
+ every extension declared in PATHEXT instead of just `.exe`
+ """
+ if not PY2:
+ # shutil.which() already uses PATHEXT on Windows, so on
+ # Python 3 we can simply use shutil.which() in all cases.
+ # (https://github.com/docker/docker-py/commit/42789818bed5d86b487a030e2e60b02bf0cfa284)
+ return which(executable, path=path)
+
+ if sys.platform != 'win32':
+ return which(executable, path)
+
+ if path is None:
+ path = os.environ['PATH']
+
+ paths = path.split(os.pathsep)
+ extensions = os.environ.get('PATHEXT', '.exe').split(os.pathsep)
+ base, ext = os.path.splitext(executable)
+
+ if not os.path.isfile(executable):
+ for p in paths:
+ for ext in extensions:
+ f = os.path.join(p, base + ext)
+ if os.path.isfile(f):
+ return f
+ return None
+ else:
+ return executable
+
+
+def create_environment_dict(overrides):
+ """
+ Create and return a copy of os.environ with the specified overrides
+ """
+ result = os.environ.copy()
+ result.update(overrides or {})
+ return result
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/errors.py b/ansible_collections/community/docker/plugins/module_utils/_api/errors.py
new file mode 100644
index 000000000..90dd5aada
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/errors.py
@@ -0,0 +1,223 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ._import_helper import HTTPError as _HTTPError
+
+from ansible.module_utils.six import raise_from
+
+
+class DockerException(Exception):
+ """
+ A base class from which all other exceptions inherit.
+
+ If you want to catch all errors that the Docker SDK might raise,
+ catch this base exception.
+ """
+
+
+def create_api_error_from_http_exception(e):
+ """
+ Create a suitable APIError from requests.exceptions.HTTPError.
+ """
+ response = e.response
+ try:
+ explanation = response.json()['message']
+ except ValueError:
+ explanation = (response.content or '').strip()
+ cls = APIError
+ if response.status_code == 404:
+ if explanation and ('No such image' in str(explanation) or
+ 'not found: does not exist or no pull access'
+ in str(explanation) or
+ 'repository does not exist' in str(explanation)):
+ cls = ImageNotFound
+ else:
+ cls = NotFound
+ raise_from(cls(e, response=response, explanation=explanation), e)
+
+
+class APIError(_HTTPError, DockerException):
+ """
+ An HTTP error from the API.
+ """
+ def __init__(self, message, response=None, explanation=None):
+ # requests 1.2 supports response as a keyword argument, but
+ # requests 1.1 doesn't
+ super(APIError, self).__init__(message)
+ self.response = response
+ self.explanation = explanation
+
+ def __str__(self):
+ message = super(APIError, self).__str__()
+
+ if self.is_client_error():
+ message = '{0} Client Error for {1}: {2}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
+
+ elif self.is_server_error():
+ message = '{0} Server Error for {1}: {2}'.format(
+ self.response.status_code, self.response.url,
+ self.response.reason)
+
+ if self.explanation:
+ message = '{0} ("{1}")'.format(message, self.explanation)
+
+ return message
+
+ @property
+ def status_code(self):
+ if self.response is not None:
+ return self.response.status_code
+
+ def is_error(self):
+ return self.is_client_error() or self.is_server_error()
+
+ def is_client_error(self):
+ if self.status_code is None:
+ return False
+ return 400 <= self.status_code < 500
+
+ def is_server_error(self):
+ if self.status_code is None:
+ return False
+ return 500 <= self.status_code < 600
+
+
+class NotFound(APIError):
+ pass
+
+
+class ImageNotFound(NotFound):
+ pass
+
+
+class InvalidVersion(DockerException):
+ pass
+
+
+class InvalidRepository(DockerException):
+ pass
+
+
+class InvalidConfigFile(DockerException):
+ pass
+
+
+class InvalidArgument(DockerException):
+ pass
+
+
+class DeprecatedMethod(DockerException):
+ pass
+
+
+class TLSParameterError(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return self.msg + (". TLS configurations should map the Docker CLI "
+ "client configurations. See "
+ "https://docs.docker.com/engine/articles/https/ "
+ "for API details.")
+
+
+class NullResource(DockerException, ValueError):
+ pass
+
+
+class ContainerError(DockerException):
+ """
+ Represents a container that has exited with a non-zero exit code.
+ """
+ def __init__(self, container, exit_status, command, image, stderr):
+ self.container = container
+ self.exit_status = exit_status
+ self.command = command
+ self.image = image
+ self.stderr = stderr
+
+ err = ": {0}".format(stderr) if stderr is not None else ""
+ msg = ("Command '{0}' in image '{1}' returned non-zero exit "
+ "status {2}{3}").format(command, image, exit_status, err)
+
+ super(ContainerError, self).__init__(msg)
+
+
+class StreamParseError(RuntimeError):
+ def __init__(self, reason):
+ self.msg = reason
+
+
+class BuildError(DockerException):
+ def __init__(self, reason, build_log):
+ super(BuildError, self).__init__(reason)
+ self.msg = reason
+ self.build_log = build_log
+
+
+class ImageLoadError(DockerException):
+ pass
+
+
+def create_unexpected_kwargs_error(name, kwargs):
+ quoted_kwargs = ["'{0}'".format(k) for k in sorted(kwargs)]
+ text = ["{0}() ".format(name)]
+ if len(quoted_kwargs) == 1:
+ text.append("got an unexpected keyword argument ")
+ else:
+ text.append("got unexpected keyword arguments ")
+ text.append(', '.join(quoted_kwargs))
+ return TypeError(''.join(text))
+
+
+class MissingContextParameter(DockerException):
+ def __init__(self, param):
+ self.param = param
+
+ def __str__(self):
+ return ("missing parameter: {0}".format(self.param))
+
+
+class ContextAlreadyExists(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return ("context {0} already exists".format(self.name))
+
+
+class ContextException(DockerException):
+ def __init__(self, msg):
+ self.msg = msg
+
+ def __str__(self):
+ return (self.msg)
+
+
+class ContextNotFound(DockerException):
+ def __init__(self, name):
+ self.name = name
+
+ def __str__(self):
+ return ("context '{0}' not found".format(self.name))
+
+
+class MissingRequirementException(DockerException):
+ def __init__(self, msg, requirement, import_exception):
+ self.msg = msg
+ self.requirement = requirement
+ self.import_exception = import_exception
+
+ def __str__(self):
+ return (self.msg)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/tls.py b/ansible_collections/community/docker/plugins/module_utils/_api/tls.py
new file mode 100644
index 000000000..ed5416d82
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/tls.py
@@ -0,0 +1,122 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import os
+import ssl
+import sys
+
+from . import errors
+from .transport.ssladapter import SSLHTTPAdapter
+
+
+class TLSConfig(object):
+ """
+ TLS configuration.
+
+ Args:
+ client_cert (tuple of str): Path to client cert, path to client key.
+ ca_cert (str): Path to CA cert file.
+ verify (bool or str): This can be ``False`` or a path to a CA cert
+ file.
+ ssl_version (int): A valid `SSL version`_.
+ assert_hostname (bool): Verify the hostname of the server.
+
+ .. _`SSL version`:
+ https://docs.python.org/3.5/library/ssl.html#ssl.PROTOCOL_TLSv1
+ """
+ cert = None
+ ca_cert = None
+ verify = None
+ ssl_version = None
+
+ def __init__(self, client_cert=None, ca_cert=None, verify=None,
+ ssl_version=None, assert_hostname=None,
+ assert_fingerprint=None):
+ # Argument compatibility/mapping with
+ # https://docs.docker.com/engine/articles/https/
+ # This diverges from the Docker CLI in that users can specify 'tls'
+ # here, but also disable any public/default CA pool verification by
+ # leaving verify=False
+
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+
+ # If the user provides an SSL version, we should use their preference
+ if ssl_version:
+ self.ssl_version = ssl_version
+ elif (sys.version_info.major, sys.version_info.minor) < (3, 6):
+ # If the user provides no ssl version, we should default to
+ # TLSv1_2. This option is the most secure, and will work for the
+ # majority of users with reasonably up-to-date software. However,
+ # before doing so, detect openssl version to ensure we can support
+ # it.
+ if ssl.OPENSSL_VERSION_INFO[:3] >= (1, 0, 1) and hasattr(
+ ssl, 'PROTOCOL_TLSv1_2'):
+ # If the OpenSSL version is high enough to support TLSv1_2,
+ # then we should use it.
+ self.ssl_version = getattr(ssl, 'PROTOCOL_TLSv1_2')
+ else:
+ # Otherwise, TLS v1.0 seems to be the safest default;
+ # SSLv23 fails in mysterious ways:
+ # https://github.com/docker/docker-py/issues/963
+ self.ssl_version = ssl.PROTOCOL_TLSv1
+ else:
+ self.ssl_version = ssl.PROTOCOL_TLS_CLIENT
+
+ # "client_cert" must have both or neither cert/key files. In
+ # either case, Alert the user when both are expected, but any are
+ # missing.
+
+ if client_cert:
+ try:
+ tls_cert, tls_key = client_cert
+ except ValueError:
+ raise errors.TLSParameterError(
+ 'client_cert must be a tuple of'
+ ' (client certificate, key file)'
+ )
+
+ if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or
+ not os.path.isfile(tls_key)):
+ raise errors.TLSParameterError(
+ 'Path to a certificate and key files must be provided'
+ ' through the client_cert param'
+ )
+ self.cert = (tls_cert, tls_key)
+
+ # If verify is set, make sure the cert exists
+ self.verify = verify
+ self.ca_cert = ca_cert
+ if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert):
+ raise errors.TLSParameterError(
+ 'Invalid CA certificate provided for `ca_cert`.'
+ )
+
+ def configure_client(self, client):
+ """
+ Configure a client with these TLS options.
+ """
+ client.ssl_version = self.ssl_version
+
+ if self.verify and self.ca_cert:
+ client.verify = self.ca_cert
+ else:
+ client.verify = self.verify
+
+ if self.cert:
+ client.cert = self.cert
+
+ client.mount('https://', SSLHTTPAdapter(
+ ssl_version=self.ssl_version,
+ assert_hostname=self.assert_hostname,
+ assert_fingerprint=self.assert_fingerprint,
+ ))
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/basehttpadapter.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/basehttpadapter.py
new file mode 100644
index 000000000..2afa60aea
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/basehttpadapter.py
@@ -0,0 +1,20 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .._import_helper import HTTPAdapter as _HTTPAdapter
+
+
+class BaseHTTPAdapter(_HTTPAdapter):
+ def close(self):
+ super(BaseHTTPAdapter, self).close()
+ if hasattr(self, 'pools'):
+ self.pools.clear()
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipeconn.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipeconn.py
new file mode 100644
index 000000000..912e465fe
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipeconn.py
@@ -0,0 +1,113 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from ansible.module_utils.six.moves.queue import Empty
+
+from .. import constants
+from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
+
+from .basehttpadapter import BaseHTTPAdapter
+from .npipesocket import NpipeSocket
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class NpipeHTTPConnection(urllib3_connection.HTTPConnection, object):
+ def __init__(self, npipe_path, timeout=60):
+ super(NpipeHTTPConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def connect(self):
+ sock = NpipeSocket()
+ sock.settimeout(self.timeout)
+ sock.connect(self.npipe_path)
+ self.sock = sock
+
+
+class NpipeHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ def __init__(self, npipe_path, timeout=60, maxsize=10):
+ super(NpipeHTTPConnectionPool, self).__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.npipe_path = npipe_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return NpipeHTTPConnection(
+ self.npipe_path, self.timeout
+ )
+
+ # When re-using connections, urllib3 tries to call select() on our
+ # NpipeSocket instance, causing a crash. To circumvent this, we override
+ # _get_conn, where that check happens.
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
+
+ except Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ )
+ pass # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class NpipeHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = HTTPAdapter.__attrs__ + ['npipe_path',
+ 'pools',
+ 'timeout',
+ 'max_pool_size']
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
+ self.npipe_path = base_url.replace('npipe://', '')
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(NpipeHTTPAdapter, self).__init__()
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = NpipeHTTPConnectionPool(
+ self.npipe_path, self.timeout,
+ maxsize=self.max_pool_size
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-sdk-python/issues/811
+ return request.path_url
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipesocket.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipesocket.py
new file mode 100644
index 000000000..5e5a90da0
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/npipesocket.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import functools
+import io
+import time
+import traceback
+
+from ansible.module_utils.six import PY2
+
+PYWIN32_IMPORT_ERROR = None
+try:
+ import win32file
+ import win32pipe
+ import pywintypes
+ import win32event
+ import win32api
+except ImportError:
+ PYWIN32_IMPORT_ERROR = traceback.format_exc()
+
+
+cERROR_PIPE_BUSY = 0xe7
+cSECURITY_SQOS_PRESENT = 0x100000
+cSECURITY_ANONYMOUS = 0
+
+MAXIMUM_RETRY_COUNT = 10
+
+
+def check_closed(f):
+ @functools.wraps(f)
+ def wrapped(self, *args, **kwargs):
+ if self._closed:
+ raise RuntimeError(
+ 'Can not reuse socket after connection was closed.'
+ )
+ return f(self, *args, **kwargs)
+ return wrapped
+
+
+class NpipeSocket(object):
+ """ Partial implementation of the socket API over windows named pipes.
+ This implementation is only designed to be used as a client socket,
+ and server-specific methods (bind, listen, accept...) are not
+ implemented.
+ """
+
+ def __init__(self, handle=None):
+ self._timeout = win32pipe.NMPWAIT_USE_DEFAULT_WAIT
+ self._handle = handle
+ self._closed = False
+
+ def accept(self):
+ raise NotImplementedError()
+
+ def bind(self, address):
+ raise NotImplementedError()
+
+ def close(self):
+ self._handle.Close()
+ self._closed = True
+
+ @check_closed
+ def connect(self, address, retry_count=0):
+ try:
+ handle = win32file.CreateFile(
+ address,
+ win32file.GENERIC_READ | win32file.GENERIC_WRITE,
+ 0,
+ None,
+ win32file.OPEN_EXISTING,
+ (cSECURITY_ANONYMOUS
+ | cSECURITY_SQOS_PRESENT
+ | win32file.FILE_FLAG_OVERLAPPED),
+ 0
+ )
+ except win32pipe.error as e:
+ # See Remarks:
+ # https://msdn.microsoft.com/en-us/library/aa365800.aspx
+ if e.winerror == cERROR_PIPE_BUSY:
+ # Another program or thread has grabbed our pipe instance
+ # before we got to it. Wait for availability and attempt to
+ # connect again.
+ retry_count = retry_count + 1
+ if (retry_count < MAXIMUM_RETRY_COUNT):
+ time.sleep(1)
+ return self.connect(address, retry_count)
+ raise e
+
+ self.flags = win32pipe.GetNamedPipeInfo(handle)[0]
+
+ self._handle = handle
+ self._address = address
+
+ @check_closed
+ def connect_ex(self, address):
+ return self.connect(address)
+
+ @check_closed
+ def detach(self):
+ self._closed = True
+ return self._handle
+
+ @check_closed
+ def dup(self):
+ return NpipeSocket(self._handle)
+
+ def getpeername(self):
+ return self._address
+
+ def getsockname(self):
+ return self._address
+
+ def getsockopt(self, level, optname, buflen=None):
+ raise NotImplementedError()
+
+ def ioctl(self, control, option):
+ raise NotImplementedError()
+
+ def listen(self, backlog):
+ raise NotImplementedError()
+
+ def makefile(self, mode=None, bufsize=None):
+ if mode.strip('b') != 'r':
+ raise NotImplementedError()
+ rawio = NpipeFileIOBase(self)
+ if bufsize is None or bufsize <= 0:
+ bufsize = io.DEFAULT_BUFFER_SIZE
+ return io.BufferedReader(rawio, buffer_size=bufsize)
+
+ @check_closed
+ def recv(self, bufsize, flags=0):
+ err, data = win32file.ReadFile(self._handle, bufsize)
+ return data
+
+ @check_closed
+ def recvfrom(self, bufsize, flags=0):
+ data = self.recv(bufsize, flags)
+ return (data, self._address)
+
+ @check_closed
+ def recvfrom_into(self, buf, nbytes=0, flags=0):
+ return self.recv_into(buf, nbytes, flags), self._address
+
+ @check_closed
+ def recv_into(self, buf, nbytes=0):
+ if PY2:
+ return self._recv_into_py2(buf, nbytes)
+
+ readbuf = buf
+ if not isinstance(buf, memoryview):
+ readbuf = memoryview(buf)
+
+ event = win32event.CreateEvent(None, True, True, None)
+ try:
+ overlapped = pywintypes.OVERLAPPED()
+ overlapped.hEvent = event
+ err, data = win32file.ReadFile(
+ self._handle,
+ readbuf[:nbytes] if nbytes else readbuf,
+ overlapped
+ )
+ wait_result = win32event.WaitForSingleObject(event, self._timeout)
+ if wait_result == win32event.WAIT_TIMEOUT:
+ win32file.CancelIo(self._handle)
+ raise TimeoutError
+ return win32file.GetOverlappedResult(self._handle, overlapped, 0)
+ finally:
+ win32api.CloseHandle(event)
+
+ def _recv_into_py2(self, buf, nbytes):
+ err, data = win32file.ReadFile(self._handle, nbytes or len(buf))
+ n = len(data)
+ buf[:n] = data
+ return n
+
+ @check_closed
+ def send(self, string, flags=0):
+ event = win32event.CreateEvent(None, True, True, None)
+ try:
+ overlapped = pywintypes.OVERLAPPED()
+ overlapped.hEvent = event
+ win32file.WriteFile(self._handle, string, overlapped)
+ wait_result = win32event.WaitForSingleObject(event, self._timeout)
+ if wait_result == win32event.WAIT_TIMEOUT:
+ win32file.CancelIo(self._handle)
+ raise TimeoutError
+ return win32file.GetOverlappedResult(self._handle, overlapped, 0)
+ finally:
+ win32api.CloseHandle(event)
+
+ @check_closed
+ def sendall(self, string, flags=0):
+ return self.send(string, flags)
+
+ @check_closed
+ def sendto(self, string, address):
+ self.connect(address)
+ return self.send(string)
+
+ def setblocking(self, flag):
+ if flag:
+ return self.settimeout(None)
+ return self.settimeout(0)
+
+ def settimeout(self, value):
+ if value is None:
+ # Blocking mode
+ self._timeout = win32event.INFINITE
+ elif not isinstance(value, (float, int)) or value < 0:
+ raise ValueError('Timeout value out of range')
+ else:
+ # Timeout mode - Value converted to milliseconds
+ self._timeout = int(value * 1000)
+
+ def gettimeout(self):
+ return self._timeout
+
+ def setsockopt(self, level, optname, value):
+ raise NotImplementedError()
+
+ @check_closed
+ def shutdown(self, how):
+ return self.close()
+
+
+class NpipeFileIOBase(io.RawIOBase):
+ def __init__(self, npipe_socket):
+ self.sock = npipe_socket
+
+ def close(self):
+ super(NpipeFileIOBase, self).close()
+ self.sock = None
+
+ def fileno(self):
+ return self.sock.fileno()
+
+ def isatty(self):
+ return False
+
+ def readable(self):
+ return True
+
+ def readinto(self, buf):
+ return self.sock.recv_into(buf)
+
+ def seekable(self):
+ return False
+
+ def writable(self):
+ return False
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/sshconn.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/sshconn.py
new file mode 100644
index 000000000..a621d7557
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/sshconn.py
@@ -0,0 +1,270 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import logging
+import os
+import signal
+import socket
+import subprocess
+import traceback
+
+from ansible.module_utils.six import PY3
+from ansible.module_utils.six.moves.queue import Empty
+from ansible.module_utils.six.moves.urllib_parse import urlparse
+
+from .basehttpadapter import BaseHTTPAdapter
+from .. import constants
+
+from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
+
+PARAMIKO_IMPORT_ERROR = None
+try:
+ import paramiko
+except ImportError:
+ PARAMIKO_IMPORT_ERROR = traceback.format_exc()
+
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class SSHSocket(socket.socket):
+ def __init__(self, host):
+ super(SSHSocket, self).__init__(
+ socket.AF_INET, socket.SOCK_STREAM)
+ self.host = host
+ self.port = None
+ self.user = None
+ if ':' in self.host:
+ self.host, self.port = self.host.split(':')
+ if '@' in self.host:
+ self.user, self.host = self.host.split('@')
+
+ self.proc = None
+
+ def connect(self, **kwargs):
+ args = ['ssh']
+ if self.user:
+ args = args + ['-l', self.user]
+
+ if self.port:
+ args = args + ['-p', self.port]
+
+ args = args + ['--', self.host, 'docker system dial-stdio']
+
+ preexec_func = None
+ if not constants.IS_WINDOWS_PLATFORM:
+ def f():
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ preexec_func = f
+
+ env = dict(os.environ)
+
+ # drop LD_LIBRARY_PATH and SSL_CERT_FILE
+ env.pop('LD_LIBRARY_PATH', None)
+ env.pop('SSL_CERT_FILE', None)
+
+ self.proc = subprocess.Popen(
+ args,
+ env=env,
+ stdout=subprocess.PIPE,
+ stdin=subprocess.PIPE,
+ preexec_fn=preexec_func)
+
+ def _write(self, data):
+ if not self.proc or self.proc.stdin.closed:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ written = self.proc.stdin.write(data)
+ self.proc.stdin.flush()
+ return written
+
+ def sendall(self, data):
+ self._write(data)
+
+ def send(self, data):
+ return self._write(data)
+
+ def recv(self, n):
+ if not self.proc:
+ raise Exception('SSH subprocess not initiated.'
+ 'connect() must be called first.')
+ return self.proc.stdout.read(n)
+
+ def makefile(self, mode):
+ if not self.proc:
+ self.connect()
+ if PY3:
+ self.proc.stdout.channel = self
+
+ return self.proc.stdout
+
+ def close(self):
+ if not self.proc or self.proc.stdin.closed:
+ return
+ self.proc.stdin.write(b'\n\n')
+ self.proc.stdin.flush()
+ self.proc.terminate()
+
+
+class SSHConnection(urllib3_connection.HTTPConnection, object):
+ def __init__(self, ssh_transport=None, timeout=60, host=None):
+ super(SSHConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.ssh_transport = ssh_transport
+ self.timeout = timeout
+ self.ssh_host = host
+
+ def connect(self):
+ if self.ssh_transport:
+ sock = self.ssh_transport.open_session()
+ sock.settimeout(self.timeout)
+ sock.exec_command('docker system dial-stdio')
+ else:
+ sock = SSHSocket(self.ssh_host)
+ sock.settimeout(self.timeout)
+ sock.connect()
+
+ self.sock = sock
+
+
+class SSHConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ scheme = 'ssh'
+
+ def __init__(self, ssh_client=None, timeout=60, maxsize=10, host=None):
+ super(SSHConnectionPool, self).__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.ssh_transport = None
+ self.timeout = timeout
+ if ssh_client:
+ self.ssh_transport = ssh_client.get_transport()
+ self.ssh_host = host
+
+ def _new_conn(self):
+ return SSHConnection(self.ssh_transport, self.timeout, self.ssh_host)
+
+ # When re-using connections, urllib3 calls fileno() on our
+ # SSH channel instance, quickly overloading our fd limit. To avoid this,
+ # we override _get_conn
+ def _get_conn(self, timeout):
+ conn = None
+ try:
+ conn = self.pool.get(block=self.block, timeout=timeout)
+
+ except AttributeError: # self.pool is None
+ raise urllib3.exceptions.ClosedPoolError(self, "Pool is closed.")
+
+ except Empty:
+ if self.block:
+ raise urllib3.exceptions.EmptyPoolError(
+ self,
+ "Pool reached maximum size and no more "
+ "connections are allowed."
+ )
+ pass # Oh well, we'll create a new connection then
+
+ return conn or self._new_conn()
+
+
+class SSHHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = HTTPAdapter.__attrs__ + [
+ 'pools', 'timeout', 'ssh_client', 'ssh_params', 'max_pool_size'
+ ]
+
+ def __init__(self, base_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE,
+ shell_out=False):
+ self.ssh_client = None
+ if not shell_out:
+ self._create_paramiko_client(base_url)
+ self._connect()
+
+ self.ssh_host = base_url
+ if base_url.startswith('ssh://'):
+ self.ssh_host = base_url[len('ssh://'):]
+
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(SSHHTTPAdapter, self).__init__()
+
+ def _create_paramiko_client(self, base_url):
+ logging.getLogger("paramiko").setLevel(logging.WARNING)
+ self.ssh_client = paramiko.SSHClient()
+ base_url = urlparse(base_url)
+ self.ssh_params = {
+ "hostname": base_url.hostname,
+ "port": base_url.port,
+ "username": base_url.username,
+ }
+ ssh_config_file = os.path.expanduser("~/.ssh/config")
+ if os.path.exists(ssh_config_file):
+ conf = paramiko.SSHConfig()
+ with open(ssh_config_file) as f:
+ conf.parse(f)
+ host_config = conf.lookup(base_url.hostname)
+ if 'proxycommand' in host_config:
+ self.ssh_params["sock"] = paramiko.ProxyCommand(
+ host_config['proxycommand']
+ )
+ if 'hostname' in host_config:
+ self.ssh_params['hostname'] = host_config['hostname']
+ if base_url.port is None and 'port' in host_config:
+ self.ssh_params['port'] = host_config['port']
+ if base_url.username is None and 'user' in host_config:
+ self.ssh_params['username'] = host_config['user']
+ if 'identityfile' in host_config:
+ self.ssh_params['key_filename'] = host_config['identityfile']
+
+ self.ssh_client.load_system_host_keys()
+ self.ssh_client.set_missing_host_key_policy(paramiko.RejectPolicy())
+
+ def _connect(self):
+ if self.ssh_client:
+ self.ssh_client.connect(**self.ssh_params)
+
+ def get_connection(self, url, proxies=None):
+ if not self.ssh_client:
+ return SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ # Connection is closed try a reconnect
+ if self.ssh_client and not self.ssh_client.get_transport():
+ self._connect()
+
+ pool = SSHConnectionPool(
+ ssh_client=self.ssh_client,
+ timeout=self.timeout,
+ maxsize=self.max_pool_size,
+ host=self.ssh_host
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def close(self):
+ super(SSHHTTPAdapter, self).close()
+ if self.ssh_client:
+ self.ssh_client.close()
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py
new file mode 100644
index 000000000..e1b5ce020
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py
@@ -0,0 +1,73 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+""" Resolves OpenSSL issues in some servers:
+ https://lukasa.co.uk/2013/01/Choosing_SSL_Version_In_Requests/
+ https://github.com/kennethreitz/requests/pull/799
+"""
+
+from ansible_collections.community.docker.plugins.module_utils.version import StrictVersion
+
+from .._import_helper import HTTPAdapter, urllib3
+from .basehttpadapter import BaseHTTPAdapter
+
+
+PoolManager = urllib3.poolmanager.PoolManager
+
+
+class SSLHTTPAdapter(BaseHTTPAdapter):
+ '''An HTTPS Transport Adapter that uses an arbitrary SSL version.'''
+
+ __attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint',
+ 'assert_hostname',
+ 'ssl_version']
+
+ def __init__(self, ssl_version=None, assert_hostname=None,
+ assert_fingerprint=None, **kwargs):
+ self.ssl_version = ssl_version
+ self.assert_hostname = assert_hostname
+ self.assert_fingerprint = assert_fingerprint
+ super(SSLHTTPAdapter, self).__init__(**kwargs)
+
+ def init_poolmanager(self, connections, maxsize, block=False):
+ kwargs = {
+ 'num_pools': connections,
+ 'maxsize': maxsize,
+ 'block': block,
+ 'assert_hostname': self.assert_hostname,
+ 'assert_fingerprint': self.assert_fingerprint,
+ }
+ if self.ssl_version and self.can_override_ssl_version():
+ kwargs['ssl_version'] = self.ssl_version
+
+ self.poolmanager = PoolManager(**kwargs)
+
+ def get_connection(self, *args, **kwargs):
+ """
+ Ensure assert_hostname is set correctly on our pool
+
+ We already take care of a normal poolmanager via init_poolmanager
+
+ But we still need to take care of when there is a proxy poolmanager
+ """
+ conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs)
+ if conn.assert_hostname != self.assert_hostname:
+ conn.assert_hostname = self.assert_hostname
+ return conn
+
+ def can_override_ssl_version(self):
+ urllib_ver = urllib3.__version__.split('-')[0]
+ if urllib_ver is None:
+ return False
+ if urllib_ver == 'dev':
+ return True
+ return StrictVersion(urllib_ver) > StrictVersion('1.5')
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/unixconn.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/unixconn.py
new file mode 100644
index 000000000..3b24fe46a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/unixconn.py
@@ -0,0 +1,114 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import socket
+
+from ansible.module_utils.six import PY2
+
+from .basehttpadapter import BaseHTTPAdapter
+from .. import constants
+
+from .._import_helper import HTTPAdapter, urllib3, urllib3_connection
+
+
+RecentlyUsedContainer = urllib3._collections.RecentlyUsedContainer
+
+
+class UnixHTTPConnection(urllib3_connection.HTTPConnection, object):
+
+ def __init__(self, base_url, unix_socket, timeout=60):
+ super(UnixHTTPConnection, self).__init__(
+ 'localhost', timeout=timeout
+ )
+ self.base_url = base_url
+ self.unix_socket = unix_socket
+ self.timeout = timeout
+ self.disable_buffering = False
+
+ def connect(self):
+ sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
+ sock.settimeout(self.timeout)
+ sock.connect(self.unix_socket)
+ self.sock = sock
+
+ def putheader(self, header, *values):
+ super(UnixHTTPConnection, self).putheader(header, *values)
+ if header == 'Connection' and 'Upgrade' in values:
+ self.disable_buffering = True
+
+ def response_class(self, sock, *args, **kwargs):
+ if PY2:
+ # FIXME: We may need to disable buffering on Py3 as well,
+ # but there's no clear way to do it at the moment. See:
+ # https://github.com/docker/docker-py/issues/1799
+ kwargs['buffering'] = not self.disable_buffering
+
+ return super(UnixHTTPConnection, self).response_class(sock, *args, **kwargs)
+
+
+class UnixHTTPConnectionPool(urllib3.connectionpool.HTTPConnectionPool):
+ def __init__(self, base_url, socket_path, timeout=60, maxsize=10):
+ super(UnixHTTPConnectionPool, self).__init__(
+ 'localhost', timeout=timeout, maxsize=maxsize
+ )
+ self.base_url = base_url
+ self.socket_path = socket_path
+ self.timeout = timeout
+
+ def _new_conn(self):
+ return UnixHTTPConnection(
+ self.base_url, self.socket_path, self.timeout
+ )
+
+
+class UnixHTTPAdapter(BaseHTTPAdapter):
+
+ __attrs__ = HTTPAdapter.__attrs__ + ['pools',
+ 'socket_path',
+ 'timeout',
+ 'max_pool_size']
+
+ def __init__(self, socket_url, timeout=60,
+ pool_connections=constants.DEFAULT_NUM_POOLS,
+ max_pool_size=constants.DEFAULT_MAX_POOL_SIZE):
+ socket_path = socket_url.replace('http+unix://', '')
+ if not socket_path.startswith('/'):
+ socket_path = '/' + socket_path
+ self.socket_path = socket_path
+ self.timeout = timeout
+ self.max_pool_size = max_pool_size
+ self.pools = RecentlyUsedContainer(
+ pool_connections, dispose_func=lambda p: p.close()
+ )
+ super(UnixHTTPAdapter, self).__init__()
+
+ def get_connection(self, url, proxies=None):
+ with self.pools.lock:
+ pool = self.pools.get(url)
+ if pool:
+ return pool
+
+ pool = UnixHTTPConnectionPool(
+ url, self.socket_path, self.timeout,
+ maxsize=self.max_pool_size
+ )
+ self.pools[url] = pool
+
+ return pool
+
+ def request_url(self, request, proxies):
+ # The select_proxy utility in requests errors out when the provided URL
+ # doesn't have a hostname, like is the case when using a UNIX socket.
+ # Since proxies are an irrelevant notion in the case of UNIX sockets
+ # anyway, we simply return the path URL directly.
+ # See also: https://github.com/docker/docker-py/issues/811
+ return request.path_url
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/types/daemon.py b/ansible_collections/community/docker/plugins/module_utils/_api/types/daemon.py
new file mode 100644
index 000000000..619644281
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/types/daemon.py
@@ -0,0 +1,83 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import socket
+
+from .._import_helper import urllib3
+
+from ..errors import DockerException
+
+
+class CancellableStream(object):
+ """
+ Stream wrapper for real-time events, logs, etc. from the server.
+
+ Example:
+ >>> events = client.events()
+ >>> for event in events:
+ ... print(event)
+ >>> # and cancel from another thread
+ >>> events.close()
+ """
+
+ def __init__(self, stream, response):
+ self._stream = stream
+ self._response = response
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ try:
+ return next(self._stream)
+ except urllib3.exceptions.ProtocolError:
+ raise StopIteration
+ except socket.error:
+ raise StopIteration
+
+ next = __next__
+
+ def close(self):
+ """
+ Closes the event streaming.
+ """
+
+ if not self._response.raw.closed:
+ # find the underlying socket object
+ # based on api.client._get_raw_response_socket
+
+ sock_fp = self._response.raw._fp.fp
+
+ if hasattr(sock_fp, 'raw'):
+ sock_raw = sock_fp.raw
+
+ if hasattr(sock_raw, 'sock'):
+ sock = sock_raw.sock
+
+ elif hasattr(sock_raw, '_sock'):
+ sock = sock_raw._sock
+
+ elif hasattr(sock_fp, 'channel'):
+ # We're working with a paramiko (SSH) channel, which doesn't
+ # support cancelable streams with the current implementation
+ raise DockerException(
+ 'Cancellable streams not supported for the SSH protocol'
+ )
+ else:
+ sock = sock_fp._sock
+
+ if hasattr(urllib3.contrib, 'pyopenssl') and isinstance(
+ sock, urllib3.contrib.pyopenssl.WrappedSocket):
+ sock = sock.socket
+
+ sock.shutdown(socket.SHUT_RDWR)
+ sock.close()
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/build.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/build.py
new file mode 100644
index 000000000..85704f943
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/build.py
@@ -0,0 +1,305 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import io
+import os
+import random
+import re
+import tarfile
+import tempfile
+
+from ansible.module_utils.six import PY3
+
+from . import fnmatch
+from ..constants import IS_WINDOWS_PLATFORM, WINDOWS_LONGPATH_PREFIX
+
+
+_SEP = re.compile('/|\\\\') if IS_WINDOWS_PLATFORM else re.compile('/')
+
+
+def tar(path, exclude=None, dockerfile=None, fileobj=None, gzip=False):
+ root = os.path.abspath(path)
+ exclude = exclude or []
+ dockerfile = dockerfile or (None, None)
+ extra_files = []
+ if dockerfile[1] is not None:
+ dockerignore_contents = '\n'.join(
+ (exclude or ['.dockerignore']) + [dockerfile[0]]
+ )
+ extra_files = [
+ ('.dockerignore', dockerignore_contents),
+ dockerfile,
+ ]
+ return create_archive(
+ files=sorted(exclude_paths(root, exclude, dockerfile=dockerfile[0])),
+ root=root, fileobj=fileobj, gzip=gzip, extra_files=extra_files
+ )
+
+
+def exclude_paths(root, patterns, dockerfile=None):
+ """
+ Given a root directory path and a list of .dockerignore patterns, return
+ an iterator of all paths (both regular files and directories) in the root
+ directory that do *not* match any of the patterns.
+
+ All paths returned are relative to the root.
+ """
+
+ if dockerfile is None:
+ dockerfile = 'Dockerfile'
+
+ patterns.append('!' + dockerfile)
+ pm = PatternMatcher(patterns)
+ return set(pm.walk(root))
+
+
+def build_file_list(root):
+ files = []
+ for dirname, dirnames, fnames in os.walk(root):
+ for filename in fnames + dirnames:
+ longpath = os.path.join(dirname, filename)
+ files.append(
+ longpath.replace(root, '', 1).lstrip('/')
+ )
+
+ return files
+
+
+def create_archive(root, files=None, fileobj=None, gzip=False,
+ extra_files=None):
+ extra_files = extra_files or []
+ if not fileobj:
+ fileobj = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w:gz' if gzip else 'w', fileobj=fileobj)
+ if files is None:
+ files = build_file_list(root)
+ extra_names = set(e[0] for e in extra_files)
+ for path in files:
+ if path in extra_names:
+ # Extra files override context files with the same name
+ continue
+ full_path = os.path.join(root, path)
+
+ i = t.gettarinfo(full_path, arcname=path)
+ if i is None:
+ # This happens when we encounter a socket file. We can safely
+ # ignore it and proceed.
+ continue
+
+ # Workaround https://bugs.python.org/issue32713
+ if i.mtime < 0 or i.mtime > 8**11 - 1:
+ i.mtime = int(i.mtime)
+
+ if IS_WINDOWS_PLATFORM:
+ # Windows doesn't keep track of the execute bit, so we make files
+ # and directories executable by default.
+ i.mode = i.mode & 0o755 | 0o111
+
+ if i.isfile():
+ try:
+ with open(full_path, 'rb') as f:
+ t.addfile(i, f)
+ except IOError:
+ raise IOError(
+ 'Can not read file in context: {0}'.format(full_path)
+ )
+ else:
+ # Directories, FIFOs, symlinks... don't need to be read.
+ t.addfile(i, None)
+
+ for name, contents in extra_files:
+ info = tarfile.TarInfo(name)
+ contents_encoded = contents.encode('utf-8')
+ info.size = len(contents_encoded)
+ t.addfile(info, io.BytesIO(contents_encoded))
+
+ t.close()
+ fileobj.seek(0)
+ return fileobj
+
+
+def mkbuildcontext(dockerfile):
+ f = tempfile.NamedTemporaryFile()
+ t = tarfile.open(mode='w', fileobj=f)
+ if isinstance(dockerfile, io.StringIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ if PY3:
+ raise TypeError('Please use io.BytesIO to create in-memory '
+ 'Dockerfiles with Python 3')
+ else:
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ elif isinstance(dockerfile, io.BytesIO):
+ dfinfo = tarfile.TarInfo('Dockerfile')
+ dfinfo.size = len(dockerfile.getvalue())
+ dockerfile.seek(0)
+ else:
+ dfinfo = t.gettarinfo(fileobj=dockerfile, arcname='Dockerfile')
+ t.addfile(dfinfo, dockerfile)
+ t.close()
+ f.seek(0)
+ return f
+
+
+def split_path(p):
+ return [pt for pt in re.split(_SEP, p) if pt and pt != '.']
+
+
+def normalize_slashes(p):
+ if IS_WINDOWS_PLATFORM:
+ return '/'.join(split_path(p))
+ return p
+
+
+def walk(root, patterns, default=True):
+ pm = PatternMatcher(patterns)
+ return pm.walk(root)
+
+
+# Heavily based on
+# https://github.com/moby/moby/blob/master/pkg/fileutils/fileutils.go
+class PatternMatcher(object):
+ def __init__(self, patterns):
+ self.patterns = list(filter(
+ lambda p: p.dirs, [Pattern(p) for p in patterns]
+ ))
+ self.patterns.append(Pattern('!.dockerignore'))
+
+ def matches(self, filepath):
+ matched = False
+ parent_path = os.path.dirname(filepath)
+ parent_path_dirs = split_path(parent_path)
+
+ for pattern in self.patterns:
+ negative = pattern.exclusion
+ match = pattern.match(filepath)
+ if not match and parent_path != '':
+ if len(pattern.dirs) <= len(parent_path_dirs):
+ match = pattern.match(
+ os.path.sep.join(parent_path_dirs[:len(pattern.dirs)])
+ )
+
+ if match:
+ matched = not negative
+
+ return matched
+
+ def walk(self, root):
+ def rec_walk(current_dir):
+ for f in os.listdir(current_dir):
+ fpath = os.path.join(
+ os.path.relpath(current_dir, root), f
+ )
+ if fpath.startswith('.' + os.path.sep):
+ fpath = fpath[2:]
+ match = self.matches(fpath)
+ if not match:
+ yield fpath
+
+ cur = os.path.join(root, fpath)
+ if not os.path.isdir(cur) or os.path.islink(cur):
+ continue
+
+ if match:
+ # If we want to skip this file and it's a directory
+ # then we should first check to see if there's an
+ # excludes pattern (e.g. !dir/file) that starts with this
+ # dir. If so then we can't skip this dir.
+ skip = True
+
+ for pat in self.patterns:
+ if not pat.exclusion:
+ continue
+ if pat.cleaned_pattern.startswith(
+ normalize_slashes(fpath)):
+ skip = False
+ break
+ if skip:
+ continue
+ for sub in rec_walk(cur):
+ yield sub
+
+ return rec_walk(root)
+
+
+class Pattern(object):
+ def __init__(self, pattern_str):
+ self.exclusion = False
+ if pattern_str.startswith('!'):
+ self.exclusion = True
+ pattern_str = pattern_str[1:]
+
+ self.dirs = self.normalize(pattern_str)
+ self.cleaned_pattern = '/'.join(self.dirs)
+
+ @classmethod
+ def normalize(cls, p):
+
+ # Remove trailing spaces
+ p = p.strip()
+
+ # Leading and trailing slashes are not relevant. Yes,
+ # "foo.py/" must exclude the "foo.py" regular file. "."
+ # components are not relevant either, even if the whole
+ # pattern is only ".", as the Docker reference states: "For
+ # historical reasons, the pattern . is ignored."
+ # ".." component must be cleared with the potential previous
+ # component, regardless of whether it exists: "A preprocessing
+ # step [...] eliminates . and .. elements using Go's
+ # filepath.".
+ i = 0
+ split = split_path(p)
+ while i < len(split):
+ if split[i] == '..':
+ del split[i]
+ if i > 0:
+ del split[i - 1]
+ i -= 1
+ else:
+ i += 1
+ return split
+
+ def match(self, filepath):
+ return fnmatch.fnmatch(normalize_slashes(filepath), self.cleaned_pattern)
+
+
+def process_dockerfile(dockerfile, path):
+ if not dockerfile:
+ return (None, None)
+
+ abs_dockerfile = dockerfile
+ if not os.path.isabs(dockerfile):
+ abs_dockerfile = os.path.join(path, dockerfile)
+ if IS_WINDOWS_PLATFORM and path.startswith(
+ WINDOWS_LONGPATH_PREFIX):
+ abs_dockerfile = '{0}{1}'.format(
+ WINDOWS_LONGPATH_PREFIX,
+ os.path.normpath(
+ abs_dockerfile[len(WINDOWS_LONGPATH_PREFIX):]
+ )
+ )
+ if (os.path.splitdrive(path)[0] != os.path.splitdrive(abs_dockerfile)[0] or
+ os.path.relpath(abs_dockerfile, path).startswith('..')):
+ # Dockerfile not in context - read data to insert into tar later
+ with open(abs_dockerfile) as df:
+ return (
+ '.dockerfile.{random:x}'.format(random=random.getrandbits(160)),
+ df.read()
+ )
+
+ # Dockerfile is inside the context - return path relative to context root
+ if dockerfile == abs_dockerfile:
+ # Only calculate relpath if necessary to avoid errors
+ # on Windows client -> Linux Docker
+ # see https://github.com/docker/compose/issues/5969
+ dockerfile = os.path.relpath(abs_dockerfile, path)
+ return (dockerfile, None)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/config.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/config.py
new file mode 100644
index 000000000..eed538b47
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/config.py
@@ -0,0 +1,78 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import logging
+import os
+
+from ..constants import IS_WINDOWS_PLATFORM
+
+DOCKER_CONFIG_FILENAME = os.path.join('.docker', 'config.json')
+LEGACY_DOCKER_CONFIG_FILENAME = '.dockercfg'
+
+log = logging.getLogger(__name__)
+
+
+def find_config_file(config_path=None):
+ paths = list(filter(None, [
+ config_path, # 1
+ config_path_from_environment(), # 2
+ os.path.join(home_dir(), DOCKER_CONFIG_FILENAME), # 3
+ os.path.join(home_dir(), LEGACY_DOCKER_CONFIG_FILENAME), # 4
+ ]))
+
+ log.debug("Trying paths: %s", repr(paths))
+
+ for path in paths:
+ if os.path.exists(path):
+ log.debug("Found file at path: %s", path)
+ return path
+
+ log.debug("No config file found")
+
+ return None
+
+
+def config_path_from_environment():
+ config_dir = os.environ.get('DOCKER_CONFIG')
+ if not config_dir:
+ return None
+ return os.path.join(config_dir, os.path.basename(DOCKER_CONFIG_FILENAME))
+
+
+def home_dir():
+ """
+ Get the user's home directory, using the same logic as the Docker Engine
+ client - use %USERPROFILE% on Windows, $HOME/getuid on POSIX.
+ """
+ if IS_WINDOWS_PLATFORM:
+ return os.environ.get('USERPROFILE', '')
+ else:
+ return os.path.expanduser('~')
+
+
+def load_general_config(config_path=None):
+ config_file = find_config_file(config_path)
+
+ if not config_file:
+ return {}
+
+ try:
+ with open(config_file) as f:
+ return json.load(f)
+ except (IOError, ValueError) as e:
+ # In the case of a legacy `.dockercfg` file, we won't
+ # be able to load any JSON data.
+ log.debug(e)
+
+ log.debug("All parsing attempts failed - returning empty config")
+ return {}
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/decorators.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/decorators.py
new file mode 100644
index 000000000..ec2d258a1
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/decorators.py
@@ -0,0 +1,59 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import functools
+
+from .. import errors
+from . import utils
+
+
+def check_resource(resource_name):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapped(self, resource_id=None, *args, **kwargs):
+ if resource_id is None and kwargs.get(resource_name):
+ resource_id = kwargs.pop(resource_name)
+ if isinstance(resource_id, dict):
+ resource_id = resource_id.get('Id', resource_id.get('ID'))
+ if not resource_id:
+ raise errors.NullResource(
+ 'Resource ID was not provided'
+ )
+ return f(self, resource_id, *args, **kwargs)
+ return wrapped
+ return decorator
+
+
+def minimum_version(version):
+ def decorator(f):
+ @functools.wraps(f)
+ def wrapper(self, *args, **kwargs):
+ if utils.version_lt(self._version, version):
+ raise errors.InvalidVersion(
+ '{0} is not available for version < {1}'.format(
+ f.__name__, version
+ )
+ )
+ return f(self, *args, **kwargs)
+ return wrapper
+ return decorator
+
+
+def update_headers(f):
+ def inner(self, *args, **kwargs):
+ if 'HttpHeaders' in self._general_configs:
+ if not kwargs.get('headers'):
+ kwargs['headers'] = self._general_configs['HttpHeaders']
+ else:
+ kwargs['headers'].update(self._general_configs['HttpHeaders'])
+ return f(self, *args, **kwargs)
+ return inner
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/fnmatch.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/fnmatch.py
new file mode 100644
index 000000000..f6e77a5f8
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/fnmatch.py
@@ -0,0 +1,127 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+"""Filename matching with shell patterns.
+
+fnmatch(FILENAME, PATTERN) matches according to the local convention.
+fnmatchcase(FILENAME, PATTERN) always takes case in account.
+
+The functions operate by translating the pattern into a regular
+expression. They cache the compiled regular expressions for speed.
+
+The function translate(PATTERN) returns a regular expression
+corresponding to PATTERN. (It does not compile it.)
+"""
+
+import re
+
+__all__ = ["fnmatch", "fnmatchcase", "translate"]
+
+_cache = {}
+_MAXCACHE = 100
+
+
+def _purge():
+ """Clear the pattern cache"""
+ _cache.clear()
+
+
+def fnmatch(name, pat):
+ """Test whether FILENAME matches PATTERN.
+
+ Patterns are Unix shell style:
+
+ * matches everything
+ ? matches any single character
+ [seq] matches any character in seq
+ [!seq] matches any char not in seq
+
+ An initial period in FILENAME is not special.
+ Both FILENAME and PATTERN are first case-normalized
+ if the operating system requires it.
+ If you don't want this, use fnmatchcase(FILENAME, PATTERN).
+ """
+
+ name = name.lower()
+ pat = pat.lower()
+ return fnmatchcase(name, pat)
+
+
+def fnmatchcase(name, pat):
+ """Test whether FILENAME matches PATTERN, including case.
+ This is a version of fnmatch() which doesn't case-normalize
+ its arguments.
+ """
+
+ try:
+ re_pat = _cache[pat]
+ except KeyError:
+ res = translate(pat)
+ if len(_cache) >= _MAXCACHE:
+ _cache.clear()
+ _cache[pat] = re_pat = re.compile(res)
+ return re_pat.match(name) is not None
+
+
+def translate(pat):
+ """Translate a shell PATTERN to a regular expression.
+
+ There is no way to quote meta-characters.
+ """
+ i, n = 0, len(pat)
+ res = '^'
+ while i < n:
+ c = pat[i]
+ i = i + 1
+ if c == '*':
+ if i < n and pat[i] == '*':
+ # is some flavor of "**"
+ i = i + 1
+ # Treat **/ as ** so eat the "/"
+ if i < n and pat[i] == '/':
+ i = i + 1
+ if i >= n:
+ # is "**EOF" - to align with .gitignore just accept all
+ res = res + '.*'
+ else:
+ # is "**"
+ # Note that this allows for any # of /'s (even 0) because
+ # the .* will eat everything, even /'s
+ res = res + '(.*/)?'
+ else:
+ # is "*" so map it to anything but "/"
+ res = res + '[^/]*'
+ elif c == '?':
+ # "?" is any char except "/"
+ res = res + '[^/]'
+ elif c == '[':
+ j = i
+ if j < n and pat[j] == '!':
+ j = j + 1
+ if j < n and pat[j] == ']':
+ j = j + 1
+ while j < n and pat[j] != ']':
+ j = j + 1
+ if j >= n:
+ res = res + '\\['
+ else:
+ stuff = pat[i:j].replace('\\', '\\\\')
+ i = j + 1
+ if stuff[0] == '!':
+ stuff = '^' + stuff[1:]
+ elif stuff[0] == '^':
+ stuff = '\\' + stuff
+ res = '%s[%s]' % (res, stuff)
+ else:
+ res = res + re.escape(c)
+
+ return res + '$'
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/json_stream.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/json_stream.py
new file mode 100644
index 000000000..f3a74bac3
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/json_stream.py
@@ -0,0 +1,89 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import json.decoder
+
+from ansible.module_utils.six import text_type
+
+from ..errors import StreamParseError
+
+
+json_decoder = json.JSONDecoder()
+
+
+def stream_as_text(stream):
+ """
+ Given a stream of bytes or text, if any of the items in the stream
+ are bytes convert them to text.
+ This function can be removed once we return text streams
+ instead of byte streams.
+ """
+ for data in stream:
+ if not isinstance(data, text_type):
+ data = data.decode('utf-8', 'replace')
+ yield data
+
+
+def json_splitter(buffer):
+ """Attempt to parse a json object from a buffer. If there is at least one
+ object, return it and the rest of the buffer, otherwise return None.
+ """
+ buffer = buffer.strip()
+ try:
+ obj, index = json_decoder.raw_decode(buffer)
+ rest = buffer[json.decoder.WHITESPACE.match(buffer, index).end():]
+ return obj, rest
+ except ValueError:
+ return None
+
+
+def json_stream(stream):
+ """Given a stream of text, return a stream of json objects.
+ This handles streams which are inconsistently buffered (some entries may
+ be newline delimited, and others are not).
+ """
+ return split_buffer(stream, json_splitter, json_decoder.decode)
+
+
+def line_splitter(buffer, separator=u'\n'):
+ index = buffer.find(text_type(separator))
+ if index == -1:
+ return None
+ return buffer[:index + 1], buffer[index + 1:]
+
+
+def split_buffer(stream, splitter=None, decoder=lambda a: a):
+ """Given a generator which yields strings and a splitter function,
+ joins all input, splits on the separator and yields each chunk.
+ Unlike string.split(), each chunk includes the trailing
+ separator, except for the last one if none was found on the end
+ of the input.
+ """
+ splitter = splitter or line_splitter
+ buffered = text_type('')
+
+ for data in stream_as_text(stream):
+ buffered += data
+ while True:
+ buffer_split = splitter(buffered)
+ if buffer_split is None:
+ break
+
+ item, buffered = buffer_split
+ yield item
+
+ if buffered:
+ try:
+ yield decoder(buffered)
+ except Exception as e:
+ raise StreamParseError(e)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/ports.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/ports.py
new file mode 100644
index 000000000..194aaa7aa
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/ports.py
@@ -0,0 +1,95 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import re
+
+PORT_SPEC = re.compile(
+ "^" # Match full string
+ "(" # External part
+ r"(\[?(?P<host>[a-fA-F\d.:]+)\]?:)?" # Address
+ r"(?P<ext>[\d]*)(-(?P<ext_end>[\d]+))?:" # External range
+ ")?"
+ r"(?P<int>[\d]+)(-(?P<int_end>[\d]+))?" # Internal range
+ "(?P<proto>/(udp|tcp|sctp))?" # Protocol
+ "$" # Match full string
+)
+
+
+def add_port_mapping(port_bindings, internal_port, external):
+ if internal_port in port_bindings:
+ port_bindings[internal_port].append(external)
+ else:
+ port_bindings[internal_port] = [external]
+
+
+def add_port(port_bindings, internal_port_range, external_range):
+ if external_range is None:
+ for internal_port in internal_port_range:
+ add_port_mapping(port_bindings, internal_port, None)
+ else:
+ ports = zip(internal_port_range, external_range)
+ for internal_port, external_port in ports:
+ add_port_mapping(port_bindings, internal_port, external_port)
+
+
+def build_port_bindings(ports):
+ port_bindings = {}
+ for port in ports:
+ internal_port_range, external_range = split_port(port)
+ add_port(port_bindings, internal_port_range, external_range)
+ return port_bindings
+
+
+def _raise_invalid_port(port):
+ raise ValueError('Invalid port "%s", should be '
+ '[[remote_ip:]remote_port[-remote_port]:]'
+ 'port[/protocol]' % port)
+
+
+def port_range(start, end, proto, randomly_available_port=False):
+ if not start:
+ return start
+ if not end:
+ return [start + proto]
+ if randomly_available_port:
+ return ['{0}-{1}'.format(start, end) + proto]
+ return [str(port) + proto for port in range(int(start), int(end) + 1)]
+
+
+def split_port(port):
+ if hasattr(port, 'legacy_repr'):
+ # This is the worst hack, but it prevents a bug in Compose 1.14.0
+ # https://github.com/docker/docker-py/issues/1668
+ # TODO: remove once fixed in Compose stable
+ port = port.legacy_repr()
+ port = str(port)
+ match = PORT_SPEC.match(port)
+ if match is None:
+ _raise_invalid_port(port)
+ parts = match.groupdict()
+
+ host = parts['host']
+ proto = parts['proto'] or ''
+ internal = port_range(parts['int'], parts['int_end'], proto)
+ external = port_range(
+ parts['ext'], parts['ext_end'], '', len(internal) == 1)
+
+ if host is None:
+ if external is not None and len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, external
+ else:
+ if not external:
+ external = [None] * len(internal)
+ elif len(internal) != len(external):
+ raise ValueError('Port ranges don\'t match in length')
+ return internal, [(host, ext_port) for ext_port in external]
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/proxy.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/proxy.py
new file mode 100644
index 000000000..ed20ff53a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/proxy.py
@@ -0,0 +1,85 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+from .utils import format_environment
+
+
+class ProxyConfig(dict):
+ '''
+ Hold the client's proxy configuration
+ '''
+ @property
+ def http(self):
+ return self.get('http')
+
+ @property
+ def https(self):
+ return self.get('https')
+
+ @property
+ def ftp(self):
+ return self.get('ftp')
+
+ @property
+ def no_proxy(self):
+ return self.get('no_proxy')
+
+ @staticmethod
+ def from_dict(config):
+ '''
+ Instantiate a new ProxyConfig from a dictionary that represents a
+ client configuration, as described in `the documentation`_.
+
+ .. _the documentation:
+ https://docs.docker.com/network/proxy/#configure-the-docker-client
+ '''
+ return ProxyConfig(
+ http=config.get('httpProxy'),
+ https=config.get('httpsProxy'),
+ ftp=config.get('ftpProxy'),
+ no_proxy=config.get('noProxy'),
+ )
+
+ def get_environment(self):
+ '''
+ Return a dictionary representing the environment variables used to
+ set the proxy settings.
+ '''
+ env = {}
+ if self.http:
+ env['http_proxy'] = env['HTTP_PROXY'] = self.http
+ if self.https:
+ env['https_proxy'] = env['HTTPS_PROXY'] = self.https
+ if self.ftp:
+ env['ftp_proxy'] = env['FTP_PROXY'] = self.ftp
+ if self.no_proxy:
+ env['no_proxy'] = env['NO_PROXY'] = self.no_proxy
+ return env
+
+ def inject_proxy_environment(self, environment):
+ '''
+ Given a list of strings representing environment variables, prepend the
+ environment variables corresponding to the proxy settings.
+ '''
+ if not self:
+ return environment
+
+ proxy_env = format_environment(self.get_environment())
+ if not environment:
+ return proxy_env
+ # It is important to prepend our variables, because we want the
+ # variables defined in "environment" to take precedence.
+ return proxy_env + environment
+
+ def __str__(self):
+ return 'ProxyConfig(http={0}, https={1}, ftp={2}, no_proxy={3})'.format(
+ self.http, self.https, self.ftp, self.no_proxy)
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py
new file mode 100644
index 000000000..9193ce30e
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py
@@ -0,0 +1,200 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import errno
+import os
+import select
+import socket as pysocket
+import struct
+import sys
+
+from ansible.module_utils.six import PY3, binary_type
+
+from ..transport.npipesocket import NpipeSocket
+
+
+STDOUT = 1
+STDERR = 2
+
+
+class SocketError(Exception):
+ pass
+
+
+# NpipeSockets have their own error types
+# pywintypes.error: (109, 'ReadFile', 'The pipe has been ended.')
+NPIPE_ENDED = 109
+
+
+def read(socket, n=4096):
+ """
+ Reads at most n bytes from socket
+ """
+
+ recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK)
+
+ if PY3 and not isinstance(socket, NpipeSocket):
+ if sys.platform == 'win32':
+ # Limited to 1024
+ select.select([socket], [], [])
+ else:
+ poll = select.poll()
+ poll.register(socket, select.POLLIN | select.POLLPRI)
+ poll.poll()
+
+ try:
+ if hasattr(socket, 'recv'):
+ return socket.recv(n)
+ if PY3 and isinstance(socket, getattr(pysocket, 'SocketIO')):
+ return socket.read(n)
+ return os.read(socket.fileno(), n)
+ except EnvironmentError as e:
+ if e.errno not in recoverable_errors:
+ raise
+ except Exception as e:
+ is_pipe_ended = (isinstance(socket, NpipeSocket) and
+ len(e.args) > 0 and
+ e.args[0] == NPIPE_ENDED)
+ if is_pipe_ended:
+ # npipes don't support duplex sockets, so we interpret
+ # a PIPE_ENDED error as a close operation (0-length read).
+ return ''
+ raise
+
+
+def read_exactly(socket, n):
+ """
+ Reads exactly n bytes from socket
+ Raises SocketError if there isn't enough data
+ """
+ data = binary_type()
+ while len(data) < n:
+ next_data = read(socket, n - len(data))
+ if not next_data:
+ raise SocketError("Unexpected EOF")
+ data += next_data
+ return data
+
+
+def next_frame_header(socket):
+ """
+ Returns the stream and size of the next frame of data waiting to be read
+ from socket, according to the protocol defined here:
+
+ https://docs.docker.com/engine/api/v1.24/#attach-to-a-container
+ """
+ try:
+ data = read_exactly(socket, 8)
+ except SocketError:
+ return (-1, -1)
+
+ stream, actual = struct.unpack('>BxxxL', data)
+ return (stream, actual)
+
+
+def frames_iter(socket, tty):
+ """
+ Return a generator of frames read from socket. A frame is a tuple where
+ the first item is the stream number and the second item is a chunk of data.
+
+ If the tty setting is enabled, the streams are multiplexed into the stdout
+ stream.
+ """
+ if tty:
+ return ((STDOUT, frame) for frame in frames_iter_tty(socket))
+ else:
+ return frames_iter_no_tty(socket)
+
+
+def frames_iter_no_tty(socket):
+ """
+ Returns a generator of data read from the socket when the tty setting is
+ not enabled.
+ """
+ while True:
+ (stream, n) = next_frame_header(socket)
+ if n < 0:
+ break
+ while n > 0:
+ result = read(socket, n)
+ if result is None:
+ continue
+ data_length = len(result)
+ if data_length == 0:
+ # We have reached EOF
+ return
+ n -= data_length
+ yield (stream, result)
+
+
+def frames_iter_tty(socket):
+ """
+ Return a generator of data read from the socket when the tty setting is
+ enabled.
+ """
+ while True:
+ result = read(socket)
+ if len(result) == 0:
+ # We have reached EOF
+ return
+ yield result
+
+
+def consume_socket_output(frames, demux=False):
+ """
+ Iterate through frames read from the socket and return the result.
+
+ Args:
+
+ demux (bool):
+ If False, stdout and stderr are multiplexed, and the result is the
+ concatenation of all the frames. If True, the streams are
+ demultiplexed, and the result is a 2-tuple where each item is the
+ concatenation of frames belonging to the same stream.
+ """
+ if demux is False:
+ # If the streams are multiplexed, the generator returns strings, that
+ # we just need to concatenate.
+ return binary_type().join(frames)
+
+ # If the streams are demultiplexed, the generator yields tuples
+ # (stdout, stderr)
+ out = [None, None]
+ for frame in frames:
+ # It is guaranteed that for each frame, one and only one stream
+ # is not None.
+ if frame == (None, None):
+ raise AssertionError('frame must be (None, None), but got %s' % (frame, ))
+ if frame[0] is not None:
+ if out[0] is None:
+ out[0] = frame[0]
+ else:
+ out[0] += frame[0]
+ else:
+ if out[1] is None:
+ out[1] = frame[1]
+ else:
+ out[1] += frame[1]
+ return tuple(out)
+
+
+def demux_adaptor(stream_id, data):
+ """
+ Utility to demultiplex stdout and stderr when reading frames from the
+ socket.
+ """
+ if stream_id == STDOUT:
+ return (data, None)
+ elif stream_id == STDERR:
+ return (None, data)
+ else:
+ raise ValueError('{0} is not a valid stream'.format(stream_id))
diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py
new file mode 100644
index 000000000..910b0dc35
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py
@@ -0,0 +1,524 @@
+# -*- coding: utf-8 -*-
+# This code is part of the Ansible collection community.docker, but is an independent component.
+# This particular file, and this file only, is based on the Docker SDK for Python (https://github.com/docker/docker-py/)
+#
+# Copyright (c) 2016-2022 Docker, Inc.
+#
+# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection)
+# SPDX-License-Identifier: Apache-2.0
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import collections
+import json
+import os
+import os.path
+import shlex
+import string
+from datetime import datetime
+from ansible_collections.community.docker.plugins.module_utils.version import StrictVersion
+
+from ansible.module_utils.six import PY2, PY3, binary_type, integer_types, iteritems, string_types, text_type
+
+from .. import errors
+from ..constants import DEFAULT_HTTP_HOST
+from ..constants import DEFAULT_UNIX_SOCKET
+from ..constants import DEFAULT_NPIPE
+from ..constants import BYTE_UNITS
+from ..tls import TLSConfig
+
+if PY2:
+ from urlparse import urlparse, urlunparse
+else:
+ from urllib.parse import urlparse, urlunparse
+
+
+URLComponents = collections.namedtuple(
+ 'URLComponents',
+ 'scheme netloc url params query fragment',
+)
+
+
+def create_ipam_pool(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_pool has been removed. Please use a '
+ 'docker.types.IPAMPool object instead.'
+ )
+
+
+def create_ipam_config(*args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_ipam_config has been removed. Please use a '
+ 'docker.types.IPAMConfig object instead.'
+ )
+
+
+def decode_json_header(header):
+ data = base64.b64decode(header)
+ if PY3:
+ data = data.decode('utf-8')
+ return json.loads(data)
+
+
+def compare_version(v1, v2):
+ """Compare docker versions
+
+ >>> v1 = '1.9'
+ >>> v2 = '1.10'
+ >>> compare_version(v1, v2)
+ 1
+ >>> compare_version(v2, v1)
+ -1
+ >>> compare_version(v2, v2)
+ 0
+ """
+ s1 = StrictVersion(v1)
+ s2 = StrictVersion(v2)
+ if s1 == s2:
+ return 0
+ elif s1 > s2:
+ return -1
+ else:
+ return 1
+
+
+def version_lt(v1, v2):
+ return compare_version(v1, v2) > 0
+
+
+def version_gte(v1, v2):
+ return not version_lt(v1, v2)
+
+
+def _convert_port_binding(binding):
+ result = {'HostIp': '', 'HostPort': ''}
+ if isinstance(binding, tuple):
+ if len(binding) == 2:
+ result['HostPort'] = binding[1]
+ result['HostIp'] = binding[0]
+ elif isinstance(binding[0], string_types):
+ result['HostIp'] = binding[0]
+ else:
+ result['HostPort'] = binding[0]
+ elif isinstance(binding, dict):
+ if 'HostPort' in binding:
+ result['HostPort'] = binding['HostPort']
+ if 'HostIp' in binding:
+ result['HostIp'] = binding['HostIp']
+ else:
+ raise ValueError(binding)
+ else:
+ result['HostPort'] = binding
+
+ if result['HostPort'] is None:
+ result['HostPort'] = ''
+ else:
+ result['HostPort'] = str(result['HostPort'])
+
+ return result
+
+
+def convert_port_bindings(port_bindings):
+ result = {}
+ for k, v in iteritems(port_bindings):
+ key = str(k)
+ if '/' not in key:
+ key += '/tcp'
+ if isinstance(v, list):
+ result[key] = [_convert_port_binding(binding) for binding in v]
+ else:
+ result[key] = [_convert_port_binding(v)]
+ return result
+
+
+def convert_volume_binds(binds):
+ if isinstance(binds, list):
+ return binds
+
+ result = []
+ for k, v in binds.items():
+ if isinstance(k, binary_type):
+ k = k.decode('utf-8')
+
+ if isinstance(v, dict):
+ if 'ro' in v and 'mode' in v:
+ raise ValueError(
+ 'Binding cannot contain both "ro" and "mode": {0}'
+ .format(repr(v))
+ )
+
+ bind = v['bind']
+ if isinstance(bind, binary_type):
+ bind = bind.decode('utf-8')
+
+ if 'ro' in v:
+ mode = 'ro' if v['ro'] else 'rw'
+ elif 'mode' in v:
+ mode = v['mode']
+ else:
+ mode = 'rw'
+
+ result.append(
+ text_type('{0}:{1}:{2}').format(k, bind, mode)
+ )
+ else:
+ if isinstance(v, binary_type):
+ v = v.decode('utf-8')
+ result.append(
+ text_type('{0}:{1}:rw').format(k, v)
+ )
+ return result
+
+
+def convert_tmpfs_mounts(tmpfs):
+ if isinstance(tmpfs, dict):
+ return tmpfs
+
+ if not isinstance(tmpfs, list):
+ raise ValueError(
+ 'Expected tmpfs value to be either a list or a dict, found: {0}'
+ .format(type(tmpfs).__name__)
+ )
+
+ result = {}
+ for mount in tmpfs:
+ if isinstance(mount, string_types):
+ if ":" in mount:
+ name, options = mount.split(":", 1)
+ else:
+ name = mount
+ options = ""
+
+ else:
+ raise ValueError(
+ "Expected item in tmpfs list to be a string, found: {0}"
+ .format(type(mount).__name__)
+ )
+
+ result[name] = options
+ return result
+
+
+def convert_service_networks(networks):
+ if not networks:
+ return networks
+ if not isinstance(networks, list):
+ raise TypeError('networks parameter must be a list.')
+
+ result = []
+ for n in networks:
+ if isinstance(n, string_types):
+ n = {'Target': n}
+ result.append(n)
+ return result
+
+
+def parse_repository_tag(repo_name):
+ parts = repo_name.rsplit('@', 1)
+ if len(parts) == 2:
+ return tuple(parts)
+ parts = repo_name.rsplit(':', 1)
+ if len(parts) == 2 and '/' not in parts[1]:
+ return tuple(parts)
+ return repo_name, None
+
+
+def parse_host(addr, is_win32=False, tls=False):
+ # Sensible defaults
+ if not addr and is_win32:
+ return DEFAULT_NPIPE
+ if not addr or addr.strip() == 'unix://':
+ return DEFAULT_UNIX_SOCKET
+
+ addr = addr.strip()
+
+ parsed_url = urlparse(addr)
+ proto = parsed_url.scheme
+ if not proto or any(x not in string.ascii_letters + '+' for x in proto):
+ # https://bugs.python.org/issue754016
+ parsed_url = urlparse('//' + addr, 'tcp')
+ proto = 'tcp'
+
+ if proto == 'fd':
+ raise errors.DockerException('fd protocol is not implemented')
+
+ # These protos are valid aliases for our library but not for the
+ # official spec
+ if proto == 'http' or proto == 'https':
+ tls = proto == 'https'
+ proto = 'tcp'
+ elif proto == 'http+unix':
+ proto = 'unix'
+
+ if proto not in ('tcp', 'unix', 'npipe', 'ssh'):
+ raise errors.DockerException(
+ "Invalid bind address protocol: {0}".format(addr)
+ )
+
+ if proto == 'tcp' and not parsed_url.netloc:
+ # "tcp://" is exceptionally disallowed by convention;
+ # omitting a hostname for other protocols is fine
+ raise errors.DockerException(
+ 'Invalid bind address format: {0}'.format(addr)
+ )
+
+ if any([
+ parsed_url.params, parsed_url.query, parsed_url.fragment,
+ parsed_url.password
+ ]):
+ raise errors.DockerException(
+ 'Invalid bind address format: {0}'.format(addr)
+ )
+
+ if parsed_url.path and proto == 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: no path allowed for this protocol:'
+ ' {0}'.format(addr)
+ )
+ else:
+ path = parsed_url.path
+ if proto == 'unix' and parsed_url.hostname is not None:
+ # For legacy reasons, we consider unix://path
+ # to be valid and equivalent to unix:///path
+ path = '/'.join((parsed_url.hostname, path))
+
+ netloc = parsed_url.netloc
+ if proto in ('tcp', 'ssh'):
+ port = parsed_url.port or 0
+ if port <= 0:
+ if proto != 'ssh':
+ raise errors.DockerException(
+ 'Invalid bind address format: port is required:'
+ ' {0}'.format(addr)
+ )
+ port = 22
+ netloc = '{0}:{1}'.format(parsed_url.netloc, port)
+
+ if not parsed_url.hostname:
+ netloc = '{0}:{1}'.format(DEFAULT_HTTP_HOST, port)
+
+ # Rewrite schemes to fit library internals (requests adapters)
+ if proto == 'tcp':
+ proto = 'http{0}'.format('s' if tls else '')
+ elif proto == 'unix':
+ proto = 'http+unix'
+
+ if proto in ('http+unix', 'npipe'):
+ return "{0}://{1}".format(proto, path).rstrip('/')
+ return urlunparse(URLComponents(
+ scheme=proto,
+ netloc=netloc,
+ url=path,
+ params='',
+ query='',
+ fragment='',
+ )).rstrip('/')
+
+
+def parse_devices(devices):
+ device_list = []
+ for device in devices:
+ if isinstance(device, dict):
+ device_list.append(device)
+ continue
+ if not isinstance(device, string_types):
+ raise errors.DockerException(
+ 'Invalid device type {0}'.format(type(device))
+ )
+ device_mapping = device.split(':')
+ if device_mapping:
+ path_on_host = device_mapping[0]
+ if len(device_mapping) > 1:
+ path_in_container = device_mapping[1]
+ else:
+ path_in_container = path_on_host
+ if len(device_mapping) > 2:
+ permissions = device_mapping[2]
+ else:
+ permissions = 'rwm'
+ device_list.append({
+ 'PathOnHost': path_on_host,
+ 'PathInContainer': path_in_container,
+ 'CgroupPermissions': permissions
+ })
+ return device_list
+
+
+def kwargs_from_env(ssl_version=None, assert_hostname=None, environment=None):
+ if not environment:
+ environment = os.environ
+ host = environment.get('DOCKER_HOST')
+
+ # empty string for cert path is the same as unset.
+ cert_path = environment.get('DOCKER_CERT_PATH') or None
+
+ # empty string for tls verify counts as "false".
+ # Any value or 'unset' counts as true.
+ tls_verify = environment.get('DOCKER_TLS_VERIFY')
+ if tls_verify == '':
+ tls_verify = False
+ else:
+ tls_verify = tls_verify is not None
+ enable_tls = cert_path or tls_verify
+
+ params = {}
+
+ if host:
+ params['base_url'] = host
+
+ if not enable_tls:
+ return params
+
+ if not cert_path:
+ cert_path = os.path.join(os.path.expanduser('~'), '.docker')
+
+ if not tls_verify and assert_hostname is None:
+ # assert_hostname is a subset of TLS verification,
+ # so if it's not set already then set it to false.
+ assert_hostname = False
+
+ params['tls'] = TLSConfig(
+ client_cert=(os.path.join(cert_path, 'cert.pem'),
+ os.path.join(cert_path, 'key.pem')),
+ ca_cert=os.path.join(cert_path, 'ca.pem'),
+ verify=tls_verify,
+ ssl_version=ssl_version,
+ assert_hostname=assert_hostname,
+ )
+
+ return params
+
+
+def convert_filters(filters):
+ result = {}
+ for k, v in iteritems(filters):
+ if isinstance(v, bool):
+ v = 'true' if v else 'false'
+ if not isinstance(v, list):
+ v = [v, ]
+ result[k] = [
+ str(item) if not isinstance(item, string_types) else item
+ for item in v
+ ]
+ return json.dumps(result)
+
+
+def datetime_to_timestamp(dt):
+ """Convert a UTC datetime to a Unix timestamp"""
+ delta = dt - datetime.utcfromtimestamp(0)
+ return delta.seconds + delta.days * 24 * 3600
+
+
+def parse_bytes(s):
+ if isinstance(s, integer_types + (float,)):
+ return s
+ if len(s) == 0:
+ return 0
+
+ if s[-2:-1].isalpha() and s[-1].isalpha():
+ if s[-1] == "b" or s[-1] == "B":
+ s = s[:-1]
+ units = BYTE_UNITS
+ suffix = s[-1].lower()
+
+ # Check if the variable is a string representation of an int
+ # without a units part. Assuming that the units are bytes.
+ if suffix.isdigit():
+ digits_part = s
+ suffix = 'b'
+ else:
+ digits_part = s[:-1]
+
+ if suffix in units.keys() or suffix.isdigit():
+ try:
+ digits = float(digits_part)
+ except ValueError:
+ raise errors.DockerException(
+ 'Failed converting the string value for memory ({0}) to'
+ ' an integer.'.format(digits_part)
+ )
+
+ # Reconvert to long for the final result
+ s = int(digits * units[suffix])
+ else:
+ raise errors.DockerException(
+ 'The specified value for memory ({0}) should specify the'
+ ' units. The postfix should be one of the `b` `k` `m` `g`'
+ ' characters'.format(s)
+ )
+
+ return s
+
+
+def normalize_links(links):
+ if isinstance(links, dict):
+ links = iteritems(links)
+
+ return ['{0}:{1}'.format(k, v) if v else k for k, v in sorted(links)]
+
+
+def parse_env_file(env_file):
+ """
+ Reads a line-separated environment file.
+ The format of each line should be "key=value".
+ """
+ environment = {}
+
+ with open(env_file, 'r') as f:
+ for line in f:
+
+ if line[0] == '#':
+ continue
+
+ line = line.strip()
+ if not line:
+ continue
+
+ parse_line = line.split('=', 1)
+ if len(parse_line) == 2:
+ k, v = parse_line
+ environment[k] = v
+ else:
+ raise errors.DockerException(
+ 'Invalid line in environment file {0}:\n{1}'.format(
+ env_file, line))
+
+ return environment
+
+
+def split_command(command):
+ if PY2 and not isinstance(command, binary_type):
+ command = command.encode('utf-8')
+ return shlex.split(command)
+
+
+def format_environment(environment):
+ def format_env(key, value):
+ if value is None:
+ return key
+ if isinstance(value, binary_type):
+ value = value.decode('utf-8')
+
+ return u'{key}={value}'.format(key=key, value=value)
+ return [format_env(*var) for var in iteritems(environment)]
+
+
+def format_extra_hosts(extra_hosts, task=False):
+ # Use format dictated by Swarm API if container is part of a task
+ if task:
+ return [
+ '{0} {1}'.format(v, k) for k, v in sorted(iteritems(extra_hosts))
+ ]
+
+ return [
+ '{0}:{1}'.format(k, v) for k, v in sorted(iteritems(extra_hosts))
+ ]
+
+
+def create_host_config(self, *args, **kwargs):
+ raise errors.DeprecatedMethod(
+ 'utils.create_host_config has been removed. Please use a '
+ 'docker.types.HostConfig object instead.'
+ )
diff --git a/ansible_collections/community/docker/plugins/module_utils/_scramble.py b/ansible_collections/community/docker/plugins/module_utils/_scramble.py
new file mode 100644
index 000000000..103257311
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/_scramble.py
@@ -0,0 +1,56 @@
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import random
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.six import PY2
+
+
+def generate_insecure_key():
+ '''Do NOT use this for cryptographic purposes!'''
+ while True:
+ # Generate a one-byte key. Right now the functions below do not use more
+ # than one byte, so this is sufficient.
+ if PY2:
+ key = chr(random.randint(0, 255))
+ else:
+ key = bytes([random.randint(0, 255)])
+ # Return anything that is not zero
+ if key != b'\x00':
+ return key
+
+
+def scramble(value, key):
+ '''Do NOT use this for cryptographic purposes!'''
+ if len(key) < 1:
+ raise ValueError('Key must be at least one byte')
+ value = to_bytes(value)
+ if PY2:
+ k = ord(key[0])
+ value = b''.join([chr(k ^ ord(b)) for b in value])
+ else:
+ k = key[0]
+ value = bytes([k ^ b for b in value])
+ return '=S=' + to_native(base64.b64encode(value))
+
+
+def unscramble(value, key):
+ '''Do NOT use this for cryptographic purposes!'''
+ if len(key) < 1:
+ raise ValueError('Key must be at least one byte')
+ if not value.startswith(u'=S='):
+ raise ValueError('Value does not start with indicator')
+ value = base64.b64decode(value[3:])
+ if PY2:
+ k = ord(key[0])
+ value = b''.join([chr(k ^ ord(b)) for b in value])
+ else:
+ k = key[0]
+ value = bytes([k ^ b for b in value])
+ return to_text(value)
diff --git a/ansible_collections/community/docker/plugins/module_utils/common.py b/ansible_collections/community/docker/plugins/module_utils/common.py
new file mode 100644
index 000000000..e6a06ed65
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/common.py
@@ -0,0 +1,693 @@
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import abc
+import os
+import platform
+import re
+import sys
+import traceback
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+HAS_DOCKER_PY = True
+HAS_DOCKER_PY_2 = False
+HAS_DOCKER_PY_3 = False
+HAS_DOCKER_ERROR = None
+HAS_DOCKER_TRACEBACK = None
+
+try:
+ from requests.exceptions import SSLError
+ from docker import __version__ as docker_version
+ from docker.errors import APIError, NotFound, TLSParameterError
+ from docker.tls import TLSConfig
+ from docker import auth
+
+ if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
+ HAS_DOCKER_PY_3 = True
+ from docker import APIClient as Client
+ elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ HAS_DOCKER_PY_2 = True
+ from docker import APIClient as Client
+ else:
+ from docker import Client
+
+except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_TRACEBACK = traceback.format_exc()
+ HAS_DOCKER_PY = False
+
+
+# The next two imports ``docker.models`` and ``docker.ssladapter`` are used
+# to ensure the user does not have both ``docker`` and ``docker-py`` modules
+# installed, as they utilize the same namespace are are incompatible
+try:
+ # docker (Docker SDK for Python >= 2.0.0)
+ import docker.models # noqa: F401, pylint: disable=unused-import
+ HAS_DOCKER_MODELS = True
+except ImportError:
+ HAS_DOCKER_MODELS = False
+
+try:
+ # docker-py (Docker SDK for Python < 2.0.0)
+ import docker.ssladapter # noqa: F401, pylint: disable=unused-import
+ HAS_DOCKER_SSLADAPTER = True
+except ImportError:
+ HAS_DOCKER_SSLADAPTER = False
+
+
+try:
+ from requests.exceptions import RequestException # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ # Either Docker SDK for Python is no longer using requests, or Docker SDK for Python isn't around either,
+ # or Docker SDK for Python's dependency requests is missing. In any case, define an exception
+ # class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.util import ( # noqa: F401, pylint: disable=unused-import
+ DEFAULT_DOCKER_HOST,
+ DEFAULT_TLS,
+ DEFAULT_TLS_VERIFY,
+ DEFAULT_TLS_HOSTNAME, # TODO: remove
+ DEFAULT_TIMEOUT_SECONDS,
+ DOCKER_COMMON_ARGS,
+ DOCKER_COMMON_ARGS_VARS, # TODO: remove
+ DOCKER_MUTUALLY_EXCLUSIVE,
+ DOCKER_REQUIRED_TOGETHER,
+ DEFAULT_DOCKER_REGISTRY, # TODO: remove
+ BYTE_SUFFIXES, # TODO: remove
+ is_image_name_id, # TODO: remove
+ is_valid_tag, # TODO: remove
+ sanitize_result,
+ DockerBaseClass, # TODO: remove
+ update_tls_hostname,
+ compare_dict_allow_more_present, # TODO: remove
+ compare_generic, # TODO: remove
+ DifferenceTracker, # TODO: remove
+ clean_dict_booleans_for_docker_api, # TODO: remove
+ convert_duration_to_nanosecond, # TODO: remove
+ parse_healthcheck, # TODO: remove
+ omit_none_from_dict, # TODO: remove
+)
+
+
+MIN_DOCKER_VERSION = "1.8.0"
+
+
+if not HAS_DOCKER_PY:
+ docker_version = None
+
+ # No Docker SDK for Python. Create a place holder client to allow
+ # instantiation of AnsibleModule and proper error handing
+ class Client(object): # noqa: F811
+ def __init__(self, **kwargs):
+ pass
+
+ class APIError(Exception): # noqa: F811
+ pass
+
+ class NotFound(Exception): # noqa: F811
+ pass
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def is_using_tls(auth):
+ return auth['tls_verify'] or auth['tls']
+
+
+def get_connect_params(auth, fail_function):
+ if is_using_tls(auth):
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ result = dict(
+ base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'],
+ )
+
+ if auth['tls_verify']:
+ # TLS with verification
+ tls_config = dict(
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function,
+ )
+ if auth['cert_path'] and auth['key_path']:
+ tls_config['client_cert'] = (auth['cert_path'], auth['key_path'])
+ if auth['cacert_path']:
+ tls_config['ca_cert'] = auth['cacert_path']
+ result['tls'] = _get_tls_config(**tls_config)
+ elif auth['tls']:
+ # TLS without verification
+ tls_config = dict(
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function,
+ )
+ if auth['cert_path'] and auth['key_path']:
+ tls_config['client_cert'] = (auth['cert_path'], auth['key_path'])
+ result['tls'] = _get_tls_config(**tls_config)
+
+ if auth.get('use_ssh_client'):
+ if LooseVersion(docker_version) < LooseVersion('4.4.0'):
+ fail_function("use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer")
+ result['use_ssh_client'] = True
+
+ # No TLS
+ return result
+
+
+DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
+DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
+DOCKERPYUPGRADE_RECOMMEND_DOCKER = "Use `pip install --upgrade docker-py` to upgrade."
+
+
+class AnsibleDockerClientBase(Client):
+ def __init__(self, min_docker_version=None, min_docker_api_version=None):
+ if min_docker_version is None:
+ min_docker_version = MIN_DOCKER_VERSION
+ NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
+
+ self.docker_py_version = LooseVersion(docker_version)
+
+ if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
+ self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
+ "SDK for Python) installed together as they use the same namespace and cause a corrupt "
+ "installation. Please uninstall both packages, and re-install only the docker-py or docker "
+ "python module (for %s's Python %s). It is recommended to install the docker module. Please "
+ "note that simply uninstalling one of the modules can leave the other module in a broken "
+ "state." % (platform.node(), sys.executable))
+
+ if not HAS_DOCKER_PY:
+ msg = missing_required_lib("Docker SDK for Python: docker>=5.0.0 (Python >= 3.6) or "
+ "docker<5.0.0 (Python 2.7)")
+ msg = msg + ", for example via `pip install docker` (Python >= 3.6) or " \
+ + "`pip install docker==4.4.4` (Python 2.7). The error was: %s"
+ self.fail(msg % HAS_DOCKER_ERROR, exception=HAS_DOCKER_TRACEBACK)
+
+ if self.docker_py_version < LooseVersion(min_docker_version):
+ msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
+ if not NEEDS_DOCKER_PY2:
+ # The minimal required version is < 2.0 (and the current version as well).
+ # Advertise docker (instead of docker-py).
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif docker_version < LooseVersion('2.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
+
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.api_version
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ min_docker_api_version = min_docker_api_version or '1.25'
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ @abc.abstractmethod
+ def fail(self, msg, **kwargs):
+ pass
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ pass
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value, type='str'):
+ if param_value is not None:
+ # take module parameter value
+ if type == 'bool':
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return bool(param_value)
+ if type == 'int':
+ return int(param_value)
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if type == 'bool':
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return bool(env_value)
+ if type == 'int':
+ return int(env_value)
+ return env_value
+
+ # take the default
+ return default_value
+
+ @abc.abstractmethod
+ def _get_params(self):
+ pass
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ client_params = self._get_params()
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = client_params.get(key)
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST, type='str'),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', None, type='str'),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto', type='str'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None, type='str'),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None, type='str'),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS, type='bool'),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY, type='bool'),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS, type='int'),
+ use_ssh_client=self._get_value('use_ssh_client', params['use_ssh_client'], None, False, type='bool'),
+ )
+
+ update_tls_hostname(result)
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.inspect_container(container=container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ for container in self.containers(all=True):
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ for network in self.networks():
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.inspect_network(network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case for some Docker versions: if docker.io wasn't there,
+ # it can be that the image wasn't found either
+ # (https://github.com/ansible/ansible/pull/15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images and '/' not in repo_name:
+ # This seems to be happening with podman-docker
+ # (https://github.com/ansible-collections/community.docker/issues/291)
+ lookup = "%s/library/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ inspection = self.inspect_image(images[0]['Id'])
+ except NotFound:
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+ return inspection
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id, accept_missing_image=False):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ inspection = self.inspect_image(image_id)
+ except NotFound as exc:
+ if not accept_missing_image:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ self.log("Image %s not found." % image_id)
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ return inspection
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ response = self.images(name=name)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ images = response
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def pull_image(self, name, tag="latest", platform=None):
+ '''
+ Pull an image
+ '''
+ kwargs = dict(
+ tag=tag,
+ stream=True,
+ decode=True,
+ )
+ if platform is not None:
+ kwargs['platform'] = platform
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ for line in self.pull(name, **kwargs):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+ def inspect_distribution(self, image, **kwargs):
+ '''
+ Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
+ since prior versions did not support accessing private repositories.
+ '''
+ if self.docker_py_version < LooseVersion('4.0.0'):
+ registry = auth.resolve_repository_name(image)[0]
+ header = auth.get_config_header(self, registry)
+ if header:
+ return self._result(self._get(
+ self._url('/distribution/{0}/json', image),
+ headers={'X-Registry-Auth': header}
+ ), json=True)
+ return super(AnsibleDockerClientBase, self).inspect_distribution(image, **kwargs)
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, required_one_of=None, required_by=None,
+ min_docker_version=None, min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if,
+ required_one_of=required_one_of,
+ required_by=required_by or {},
+ )
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+
+ super(AnsibleDockerClient, self).__init__(
+ min_docker_version=min_docker_version,
+ min_docker_api_version=min_docker_api_version)
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ self.module.deprecate(msg, version=version, date=date, collection_name=collection_name)
+
+ def _get_params(self):
+ return self.module.params
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_py = True
+ support_docker_api = True
+ if 'docker_py_version' in data:
+ support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_py and support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ elif not support_docker_py:
+ msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
+ if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif self.docker_py_version < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
diff --git a/ansible_collections/community/docker/plugins/module_utils/common_api.py b/ansible_collections/community/docker/plugins/module_utils/common_api.py
new file mode 100644
index 000000000..7d46a153a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/common_api.py
@@ -0,0 +1,591 @@
+# Copyright 2016 Red Hat | Ansible
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import abc
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+try:
+ from requests.exceptions import RequestException, SSLError # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ # Define an exception class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils._api import auth
+from ansible_collections.community.docker.plugins.module_utils._api.api.client import APIClient as Client
+from ansible_collections.community.docker.plugins.module_utils._api.errors import (
+ APIError,
+ NotFound,
+ MissingRequirementException,
+ TLSParameterError,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.tls import TLSConfig
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
+ convert_filters,
+ parse_repository_tag,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import ( # noqa: F401, pylint: disable=unused-import
+ DEFAULT_DOCKER_HOST,
+ DEFAULT_TLS,
+ DEFAULT_TLS_VERIFY,
+ DEFAULT_TLS_HOSTNAME, # TODO: remove
+ DEFAULT_TIMEOUT_SECONDS,
+ DOCKER_COMMON_ARGS,
+ DOCKER_MUTUALLY_EXCLUSIVE,
+ DOCKER_REQUIRED_TOGETHER,
+ DEFAULT_DOCKER_REGISTRY, # TODO: remove
+ is_image_name_id, # TODO: remove
+ is_valid_tag, # TODO: remove
+ sanitize_result,
+ update_tls_hostname,
+)
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def is_using_tls(auth_data):
+ return auth_data['tls_verify'] or auth_data['tls']
+
+
+def get_connect_params(auth_data, fail_function):
+ if is_using_tls(auth_data):
+ auth_data['docker_host'] = auth_data['docker_host'].replace('tcp://', 'https://')
+
+ result = dict(
+ base_url=auth_data['docker_host'],
+ version=auth_data['api_version'],
+ timeout=auth_data['timeout'],
+ )
+
+ if auth_data['tls_verify']:
+ # TLS with verification
+ tls_config = dict(
+ verify=True,
+ assert_hostname=auth_data['tls_hostname'],
+ ssl_version=auth_data['ssl_version'],
+ fail_function=fail_function,
+ )
+ if auth_data['cert_path'] and auth_data['key_path']:
+ tls_config['client_cert'] = (auth_data['cert_path'], auth_data['key_path'])
+ if auth_data['cacert_path']:
+ tls_config['ca_cert'] = auth_data['cacert_path']
+ result['tls'] = _get_tls_config(**tls_config)
+ elif auth_data['tls']:
+ # TLS without verification
+ tls_config = dict(
+ verify=False,
+ ssl_version=auth_data['ssl_version'],
+ fail_function=fail_function,
+ )
+ if auth_data['cert_path'] and auth_data['key_path']:
+ tls_config['client_cert'] = (auth_data['cert_path'], auth_data['key_path'])
+ result['tls'] = _get_tls_config(**tls_config)
+
+ if auth_data.get('use_ssh_client'):
+ result['use_ssh_client'] = True
+
+ # No TLS
+ return result
+
+
+class AnsibleDockerClientBase(Client):
+ def __init__(self, min_docker_api_version=None):
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.api_version
+ except MissingRequirementException as exc:
+ self.fail(missing_required_lib(exc.requirement), exception=exc.import_exception)
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ min_docker_api_version = min_docker_api_version or '1.25'
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ @abc.abstractmethod
+ def fail(self, msg, **kwargs):
+ pass
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ pass
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value, type='str'):
+ if param_value is not None:
+ # take module parameter value
+ if type == 'bool':
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return bool(param_value)
+ if type == 'int':
+ return int(param_value)
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if type == 'bool':
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return bool(env_value)
+ if type == 'int':
+ return int(env_value)
+ return env_value
+
+ # take the default
+ return default_value
+
+ @abc.abstractmethod
+ def _get_params(self):
+ pass
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ client_params = self._get_params()
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = client_params.get(key)
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST, type='str'),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', None, type='str'),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto', type='str'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None, type='str'),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None, type='str'),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS, type='bool'),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY, type='bool'),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS, type='int'),
+ use_ssh_client=self._get_value('use_ssh_client', params['use_ssh_client'], None, False, type='bool'),
+ )
+
+ def depr(*args, **kwargs):
+ self.deprecate(*args, **kwargs)
+
+ update_tls_hostname(result, old_behavior=True, deprecate_function=depr, uses_tls=is_using_tls(result))
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.get_json('/containers/{0}/json', container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ params = {
+ 'limit': -1,
+ 'all': 1,
+ 'size': 0,
+ 'trunc_cmd': 0,
+ }
+ containers = self.get_json("/containers/json", params=params)
+ for container in containers:
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ networks = self.get_json("/networks")
+ for network in networks:
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.get_json('/networks/{0}', network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ params = {
+ 'only_ids': 0,
+ 'all': 0,
+ }
+ if LooseVersion(self.api_version) < LooseVersion('1.25'):
+ # only use "filter" on API 1.24 and under, as it is deprecated
+ params['filter'] = name
+ else:
+ params['filters'] = convert_filters({'reference': name})
+ images = self.get_json("/images/json", params=params)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ response = images
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case for some Docker versions: if docker.io wasn't there,
+ # it can be that the image wasn't found either
+ # (https://github.com/ansible/ansible/pull/15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images and '/' not in repo_name:
+ # This seems to be happening with podman-docker
+ # (https://github.com/ansible-collections/community.docker/issues/291)
+ lookup = "%s/library/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ return self.get_json('/images/{0}/json', images[0]['Id'])
+ except NotFound:
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id, accept_missing_image=False):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ return self.get_json('/images/{0}/json', image_id)
+ except NotFound as exc:
+ if not accept_missing_image:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ self.log("Image %s not found." % image_id)
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+
+ def pull_image(self, name, tag="latest", platform=None):
+ '''
+ Pull an image
+ '''
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ repository, image_tag = parse_repository_tag(name)
+ registry, repo_name = auth.resolve_repository_name(repository)
+ params = {
+ 'tag': tag or image_tag or 'latest',
+ 'fromImage': repository,
+ }
+ if platform is not None:
+ params['platform'] = platform
+
+ headers = {}
+ header = auth.get_config_header(self, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+
+ response = self._post(
+ self._url('/images/create'), params=params, headers=headers,
+ stream=True, timeout=None
+ )
+ self._raise_for_status(response)
+ for line in self._stream_helper(response, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, required_one_of=None, required_by=None,
+ min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if,
+ required_one_of=required_one_of,
+ required_by=required_by or {},
+ )
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+
+ super(AnsibleDockerClient, self).__init__(min_docker_api_version=min_docker_api_version)
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ self.module.deprecate(msg, version=version, date=date, collection_name=collection_name)
+
+ def _get_params(self):
+ return self.module.params
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_api = True
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
diff --git a/ansible_collections/community/docker/plugins/module_utils/copy.py b/ansible_collections/community/docker/plugins/module_utils/copy.py
new file mode 100644
index 000000000..6df84598a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/copy.py
@@ -0,0 +1,442 @@
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import base64
+import datetime
+import io
+import json
+import os
+import os.path
+import shutil
+import stat
+import tarfile
+
+from ansible.module_utils.common.text.converters import to_bytes, to_native, to_text
+from ansible.module_utils.six import raise_from
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, NotFound
+
+
+class DockerFileCopyError(Exception):
+ pass
+
+
+class DockerUnexpectedError(DockerFileCopyError):
+ pass
+
+
+class DockerFileNotFound(DockerFileCopyError):
+ pass
+
+
+def _put_archive(client, container, path, data):
+ # data can also be file object for streaming. This is because _put uses requests's put().
+ # See https://requests.readthedocs.io/en/latest/user/advanced/#streaming-uploads
+ url = client._url('/containers/{0}/archive', container)
+ res = client._put(url, params={'path': path}, data=data)
+ client._raise_for_status(res)
+ return res.status_code == 200
+
+
+def _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
+ if not stat.S_ISLNK(file_stat.st_mode):
+ raise DockerUnexpectedError('stat information is not for a symlink')
+ bio = io.BytesIO()
+ with tarfile.open(fileobj=bio, mode='w|', dereference=False, encoding='utf-8') as tar:
+ # Note that without both name (bytes) and arcname (unicode), this either fails for
+ # Python 2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
+ # form) it works with Python 2.7, 3.5, 3.6, and 3.7 up to 3.11
+ tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
+ tarinfo.uid = user_id
+ tarinfo.uname = ''
+ if user_name:
+ tarinfo.uname = user_name
+ tarinfo.gid = group_id
+ tarinfo.gname = ''
+ tarinfo.mode &= 0o700
+ if mode is not None:
+ tarinfo.mode = mode
+ if not tarinfo.issym():
+ raise DockerUnexpectedError('stat information is not for a symlink')
+ tar.addfile(tarinfo)
+ return bio.getvalue()
+
+
+def _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
+ yield _symlink_tar_creator(b_in_path, file_stat, out_file, user_id, group_id, mode, user_name)
+
+
+def _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=None, user_name=None):
+ if not stat.S_ISREG(file_stat.st_mode):
+ raise DockerUnexpectedError('stat information is not for a regular file')
+ tarinfo = tarfile.TarInfo()
+ tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
+ tarinfo.mode = (file_stat.st_mode & 0o700) if mode is None else mode
+ tarinfo.uid = user_id
+ tarinfo.gid = group_id
+ tarinfo.size = file_stat.st_size
+ tarinfo.mtime = file_stat.st_mtime
+ tarinfo.type = tarfile.REGTYPE
+ tarinfo.linkname = ''
+ if user_name:
+ tarinfo.uname = user_name
+
+ tarinfo_buf = tarinfo.tobuf()
+ total_size = len(tarinfo_buf)
+ yield tarinfo_buf
+
+ size = tarinfo.size
+ total_size += size
+ with open(b_in_path, 'rb') as f:
+ while size > 0:
+ to_read = min(size, 65536)
+ buf = f.read(to_read)
+ if not buf:
+ break
+ size -= len(buf)
+ yield buf
+ if size:
+ # If for some reason the file shrunk, fill up to the announced size with zeros.
+ # (If it enlarged, ignore the remainder.)
+ yield tarfile.NUL * size
+
+ remainder = tarinfo.size % tarfile.BLOCKSIZE
+ if remainder:
+ # We need to write a multiple of 512 bytes. Fill up with zeros.
+ yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
+ total_size += tarfile.BLOCKSIZE - remainder
+
+ # End with two zeroed blocks
+ yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
+ total_size += 2 * tarfile.BLOCKSIZE
+
+ remainder = total_size % tarfile.RECORDSIZE
+ if remainder > 0:
+ yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
+
+
+def _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=None):
+ tarinfo = tarfile.TarInfo()
+ tarinfo.name = os.path.splitdrive(to_text(out_file))[1].replace(os.sep, '/').lstrip('/')
+ tarinfo.mode = mode
+ tarinfo.uid = user_id
+ tarinfo.gid = group_id
+ tarinfo.size = len(content)
+ try:
+ tarinfo.mtime = int(datetime.datetime.now().timestamp())
+ except AttributeError:
+ # Python 2 (or more precisely: Python < 3.3) has no timestamp(). Use the following
+ # expression for Python 2:
+ tarinfo.mtime = int((datetime.datetime.utcnow() - datetime.datetime(1970, 1, 1)).total_seconds())
+ tarinfo.type = tarfile.REGTYPE
+ tarinfo.linkname = ''
+ if user_name:
+ tarinfo.uname = user_name
+
+ tarinfo_buf = tarinfo.tobuf()
+ total_size = len(tarinfo_buf)
+ yield tarinfo_buf
+
+ total_size += len(content)
+ yield content
+
+ remainder = tarinfo.size % tarfile.BLOCKSIZE
+ if remainder:
+ # We need to write a multiple of 512 bytes. Fill up with zeros.
+ yield tarfile.NUL * (tarfile.BLOCKSIZE - remainder)
+ total_size += tarfile.BLOCKSIZE - remainder
+
+ # End with two zeroed blocks
+ yield tarfile.NUL * (2 * tarfile.BLOCKSIZE)
+ total_size += 2 * tarfile.BLOCKSIZE
+
+ remainder = total_size % tarfile.RECORDSIZE
+ if remainder > 0:
+ yield tarfile.NUL * (tarfile.RECORDSIZE - remainder)
+
+
+def put_file(client, container, in_path, out_path, user_id, group_id, mode=None, user_name=None, follow_links=False):
+ """Transfer a file from local to Docker container."""
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise DockerFileNotFound(
+ "file or module does not exist: %s" % to_native(in_path))
+
+ b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
+
+ out_dir, out_file = os.path.split(out_path)
+
+ if follow_links:
+ file_stat = os.stat(b_in_path)
+ else:
+ file_stat = os.lstat(b_in_path)
+
+ if stat.S_ISREG(file_stat.st_mode):
+ stream = _regular_file_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
+ elif stat.S_ISLNK(file_stat.st_mode):
+ stream = _symlink_tar_generator(b_in_path, file_stat, out_file, user_id, group_id, mode=mode, user_name=user_name)
+ else:
+ raise DockerFileCopyError(
+ 'File{0} {1} is neither a regular file nor a symlink (stat mode {2}).'.format(
+ ' referenced by' if follow_links else '', in_path, oct(file_stat.st_mode)))
+
+ ok = _put_archive(client, container, out_dir, stream)
+ if not ok:
+ raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
+
+
+def put_file_content(client, container, content, out_path, user_id, group_id, mode, user_name=None):
+ """Transfer a file from local to Docker container."""
+ out_dir, out_file = os.path.split(out_path)
+
+ stream = _regular_content_tar_generator(content, out_file, user_id, group_id, mode, user_name=user_name)
+
+ ok = _put_archive(client, container, out_dir, stream)
+ if not ok:
+ raise DockerUnexpectedError('Unknown error while creating file "{0}" in container "{1}".'.format(out_path, container))
+
+
+def stat_file(client, container, in_path, follow_links=False, log=None):
+ """Fetch information on a file from a Docker container to local.
+
+ Return a tuple ``(path, stat_data, link_target)`` where:
+
+ :path: is the resolved path in case ``follow_links=True``;
+ :stat_data: is ``None`` if the file does not exist, or a dictionary with fields
+ ``name`` (string), ``size`` (integer), ``mode`` (integer, see https://pkg.go.dev/io/fs#FileMode),
+ ``mtime`` (string), and ``linkTarget`` (string);
+ :link_target: is ``None`` if the file is not a symlink or when ``follow_links=False``,
+ and a string with the symlink target otherwise.
+ """
+ considered_in_paths = set()
+
+ while True:
+ if in_path in considered_in_paths:
+ raise DockerFileCopyError('Found infinite symbolic link loop when trying to stating "{0}"'.format(in_path))
+ considered_in_paths.add(in_path)
+
+ if log:
+ log('FETCH: Stating "%s"' % in_path)
+
+ response = client._head(
+ client._url('/containers/{0}/archive', container),
+ params={'path': in_path},
+ )
+ if response.status_code == 404:
+ return in_path, None, None
+ client._raise_for_status(response)
+ header = response.headers.get('x-docker-container-path-stat')
+ try:
+ stat_data = json.loads(base64.b64decode(header))
+ except Exception as exc:
+ raise DockerUnexpectedError(
+ 'When retrieving information for {in_path} from {container}, obtained header {header!r} that cannot be loaded as JSON: {exc}'
+ .format(in_path=in_path, container=container, header=header, exc=exc)
+ )
+
+ # https://pkg.go.dev/io/fs#FileMode: bit 32 - 5 means ModeSymlink
+ if stat_data['mode'] & (1 << (32 - 5)) != 0:
+ link_target = stat_data['linkTarget']
+ if not follow_links:
+ return in_path, stat_data, link_target
+ in_path = os.path.join(os.path.split(in_path)[0], link_target)
+ continue
+
+ return in_path, stat_data, None
+
+
+class _RawGeneratorFileobj(io.RawIOBase):
+ def __init__(self, stream):
+ self._stream = stream
+ self._buf = b''
+
+ def readable(self):
+ return True
+
+ def _readinto_from_buf(self, b, index, length):
+ cpy = min(length - index, len(self._buf))
+ if cpy:
+ b[index:index + cpy] = self._buf[:cpy]
+ self._buf = self._buf[cpy:]
+ index += cpy
+ return index
+
+ def readinto(self, b):
+ index = 0
+ length = len(b)
+
+ index = self._readinto_from_buf(b, index, length)
+ if index == length:
+ return index
+
+ try:
+ self._buf += next(self._stream)
+ except StopIteration:
+ return index
+
+ return self._readinto_from_buf(b, index, length)
+
+
+def _stream_generator_to_fileobj(stream):
+ '''Given a generator that generates chunks of bytes, create a readable buffered stream.'''
+ raw = _RawGeneratorFileobj(stream)
+ return io.BufferedReader(raw)
+
+
+def fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=False, log=None):
+ """Fetch a file (as a tar file entry) from a Docker container to local."""
+ considered_in_paths = set()
+
+ while True:
+ if in_path in considered_in_paths:
+ raise DockerFileCopyError('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path))
+ considered_in_paths.add(in_path)
+
+ if log:
+ log('FETCH: Fetching "%s"' % in_path)
+ try:
+ stream = client.get_raw_stream(
+ '/containers/{0}/archive', container,
+ params={'path': in_path},
+ headers={'Accept-Encoding': 'identity'},
+ )
+ except NotFound:
+ return process_none(in_path)
+
+ with tarfile.open(fileobj=_stream_generator_to_fileobj(stream), mode='r|') as tar:
+ symlink_member = None
+ result = None
+ found = False
+ for member in tar:
+ if found:
+ raise DockerUnexpectedError('Received tarfile contains more than one file!')
+ found = True
+ if member.issym():
+ symlink_member = member
+ continue
+ if member.isfile():
+ result = process_regular(in_path, tar, member)
+ continue
+ result = process_other(in_path, member)
+ if symlink_member:
+ if not follow_links:
+ return process_symlink(in_path, symlink_member)
+ in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
+ if log:
+ log('FETCH: Following symbolic link to "%s"' % in_path)
+ continue
+ if found:
+ return result
+ raise DockerUnexpectedError('Received tarfile is empty!')
+
+
+def fetch_file(client, container, in_path, out_path, follow_links=False, log=None):
+ b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
+
+ def process_none(in_path):
+ raise DockerFileNotFound(
+ 'File {in_path} does not exist in container {container}'
+ .format(in_path=in_path, container=container)
+ )
+
+ def process_regular(in_path, tar, member):
+ if not follow_links and os.path.exists(b_out_path):
+ os.unlink(b_out_path)
+
+ in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
+ with open(b_out_path, 'wb') as out_f:
+ shutil.copyfileobj(in_f, out_f)
+ return in_path
+
+ def process_symlink(in_path, member):
+ if os.path.exists(b_out_path):
+ os.unlink(b_out_path)
+
+ os.symlink(member.linkname, b_out_path)
+ return in_path
+
+ def process_other(in_path, member):
+ raise DockerFileCopyError('Remote file "%s" is not a regular file or a symbolic link' % in_path)
+
+ return fetch_file_ex(client, container, in_path, process_none, process_regular, process_symlink, process_other, follow_links=follow_links, log=log)
+
+
+def _execute_command(client, container, command, log=None, check_rc=False):
+ if log:
+ log('Executing {command} in {container}'.format(command=command, container=container))
+
+ data = {
+ 'Container': container,
+ 'User': '',
+ 'Privileged': False,
+ 'Tty': False,
+ 'AttachStdin': False,
+ 'AttachStdout': True,
+ 'AttachStderr': True,
+ 'Cmd': command,
+ }
+
+ if 'detachKeys' in client._general_configs:
+ data['detachKeys'] = client._general_configs['detachKeys']
+
+ try:
+ exec_data = client.post_json_to_json('/containers/{0}/exec', container, data=data)
+ except NotFound as e:
+ raise_from(
+ DockerFileCopyError('Could not find container "{container}"'.format(container=container)),
+ e,
+ )
+ except APIError as e:
+ if e.response is not None and e.response.status_code == 409:
+ raise_from(
+ DockerFileCopyError('Cannot execute command in paused container "{container}"'.format(container=container)),
+ e,
+ )
+ raise
+ exec_id = exec_data['Id']
+
+ data = {
+ 'Tty': False,
+ 'Detach': False
+ }
+ stdout, stderr = client.post_json_to_stream('/exec/{0}/start', exec_id, stream=False, demux=True, tty=False)
+
+ result = client.get_json('/exec/{0}/json', exec_id)
+
+ rc = result.get('ExitCode') or 0
+ stdout = stdout or b''
+ stderr = stderr or b''
+
+ if log:
+ log('Exit code {rc}, stdout {stdout!r}, stderr {stderr!r}'.format(rc=rc, stdout=stdout, stderr=stderr))
+
+ if check_rc and rc != 0:
+ raise DockerUnexpectedError(
+ 'Obtained unexpected exit code {rc} when running "{command}" in {container}.\nSTDOUT: {stdout}\nSTDERR: {stderr}'
+ .format(command=' '.join(command), container=container, rc=rc, stdout=stdout, stderr=stderr)
+ )
+
+ return rc, stdout, stderr
+
+
+def determine_user_group(client, container, log=None):
+ dummy, stdout, stderr = _execute_command(client, container, ['/bin/sh', '-c', 'id -u && id -g'], check_rc=True, log=log)
+
+ stdout_lines = stdout.splitlines()
+ if len(stdout_lines) != 2:
+ raise DockerUnexpectedError(
+ 'Expected two-line output to obtain user and group ID for container {container}, but got {lc} lines:\n{stdout}'
+ .format(container=container, lc=len(stdout_lines), stdout=stdout)
+ )
+
+ user_id, group_id = stdout_lines
+ try:
+ return int(user_id), int(group_id)
+ except ValueError:
+ raise DockerUnexpectedError(
+ 'Expected two-line output with numeric IDs to obtain user and group ID for container {container}, but got "{l1}" and "{l2}" instead'
+ .format(container=container, l1=user_id, l2=group_id)
+ )
diff --git a/ansible_collections/community/docker/plugins/module_utils/image_archive.py b/ansible_collections/community/docker/plugins/module_utils/image_archive.py
new file mode 100644
index 000000000..e174631e2
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/image_archive.py
@@ -0,0 +1,157 @@
+# Copyright 2022 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+import json
+import os
+import tarfile
+
+from ansible.module_utils.common.text.converters import to_native
+
+
+class ImageArchiveManifestSummary(object):
+ '''
+ Represents data extracted from a manifest.json found in the tar archive output of the
+ "docker image save some:tag > some.tar" command.
+ '''
+
+ def __init__(self, image_id, repo_tags):
+ '''
+ :param image_id: File name portion of Config entry, e.g. abcde12345 from abcde12345.json
+ :type image_id: str
+ :param repo_tags Docker image names, e.g. ["hello-world:latest"]
+ :type repo_tags: list
+ '''
+
+ self.image_id = image_id
+ self.repo_tags = repo_tags
+
+
+class ImageArchiveInvalidException(Exception):
+ def __init__(self, message, cause):
+ '''
+ :param message: Exception message
+ :type message: str
+ :param cause: Inner exception that this exception wraps
+ :type cause: Exception | None
+ '''
+
+ super(ImageArchiveInvalidException, self).__init__(message)
+
+ # Python 2 doesn't support causes
+ self.cause = cause
+
+
+def api_image_id(archive_image_id):
+ '''
+ Accepts an image hash in the format stored in manifest.json, and returns an equivalent identifier
+ that represents the same image hash, but in the format presented by the Docker Engine API.
+
+ :param archive_image_id: plain image hash
+ :type archive_image_id: str
+
+ :returns: Prefixed hash used by REST api
+ :rtype: str
+ '''
+
+ return 'sha256:%s' % archive_image_id
+
+
+def archived_image_manifest(archive_path):
+ '''
+ Attempts to get Image.Id and image name from metadata stored in the image
+ archive tar file.
+
+ The tar should contain a file "manifest.json" with an array with a single entry,
+ and the entry should have a Config field with the image ID in its file name, as
+ well as a RepoTags list, which typically has only one entry.
+
+ :raises:
+ ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it.
+
+ :param archive_path: Tar file to read
+ :type archive_path: str
+
+ :return: None, if no file at archive_path, or the extracted image ID, which will not have a sha256: prefix.
+ :rtype: ImageArchiveManifestSummary
+ '''
+
+ try:
+ # FileNotFoundError does not exist in Python 2
+ if not os.path.isfile(archive_path):
+ return None
+
+ tf = tarfile.open(archive_path, 'r')
+ try:
+ try:
+ ef = tf.extractfile('manifest.json')
+ try:
+ text = ef.read().decode('utf-8')
+ manifest = json.loads(text)
+ except Exception as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to decode and deserialize manifest.json: %s" % to_native(exc),
+ exc
+ )
+ finally:
+ # In Python 2.6, this does not have __exit__
+ ef.close()
+
+ if len(manifest) != 1:
+ raise ImageArchiveInvalidException(
+ "Expected to have one entry in manifest.json but found %s" % len(manifest),
+ None
+ )
+
+ m0 = manifest[0]
+
+ try:
+ config_file = m0['Config']
+ except KeyError as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to get Config entry from manifest.json: %s" % to_native(exc),
+ exc
+ )
+
+ # Extracts hash without 'sha256:' prefix
+ try:
+ # Strip off .json filename extension, leaving just the hash.
+ image_id = os.path.splitext(config_file)[0]
+ except Exception as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to extract image id from config file name %s: %s" % (config_file, to_native(exc)),
+ exc
+ )
+
+ try:
+ repo_tags = m0['RepoTags']
+ except KeyError as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to get RepoTags entry from manifest.json: %s" % to_native(exc),
+ exc
+ )
+
+ return ImageArchiveManifestSummary(
+ image_id=image_id,
+ repo_tags=repo_tags
+ )
+
+ except ImageArchiveInvalidException:
+ raise
+ except Exception as exc:
+ raise ImageArchiveInvalidException(
+ "Failed to extract manifest.json from tar file %s: %s" % (archive_path, to_native(exc)),
+ exc
+ )
+
+ finally:
+ # In Python 2.6, TarFile does not have __exit__
+ tf.close()
+
+ except ImageArchiveInvalidException:
+ raise
+ except Exception as exc:
+ raise ImageArchiveInvalidException("Failed to open tar file %s: %s" % (archive_path, to_native(exc)), exc)
diff --git a/ansible_collections/community/docker/plugins/module_utils/module_container/base.py b/ansible_collections/community/docker/plugins/module_utils/module_container/base.py
new file mode 100644
index 000000000..21c29226e
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/module_container/base.py
@@ -0,0 +1,1204 @@
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import abc
+import os
+import re
+import shlex
+
+from functools import partial
+
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.six import string_types
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ clean_dict_booleans_for_docker_api,
+ normalize_healthcheck,
+ omit_none_from_dict,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
+ parse_env_file,
+)
+
+
+_DEFAULT_IP_REPLACEMENT_STRING = '[[DEFAULT_IP:iewahhaeB4Sae6Aen8IeShairoh4zeph7xaekoh8Geingunaesaeweiy3ooleiwi]]'
+
+
+_MOUNT_OPTION_TYPES = dict(
+ volume_driver='volume',
+ volume_options='volume',
+ propagation='bind',
+ no_copy='volume',
+ labels='volume',
+ tmpfs_size='tmpfs',
+ tmpfs_mode='tmpfs',
+)
+
+
+def _get_ansible_type(type):
+ if type == 'set':
+ return 'list'
+ if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'):
+ raise Exception('Invalid type "%s"' % (type, ))
+ return type
+
+
+class Option(object):
+ def __init__(
+ self,
+ name,
+ type,
+ owner,
+ ansible_type=None,
+ elements=None,
+ ansible_elements=None,
+ ansible_suboptions=None,
+ ansible_aliases=None,
+ ansible_choices=None,
+ needs_no_suboptions=False,
+ default_comparison=None,
+ not_a_container_option=False,
+ not_an_ansible_option=False,
+ copy_comparison_from=None,
+ ):
+ self.name = name
+ self.type = type
+ self.ansible_type = ansible_type or _get_ansible_type(type)
+ needs_elements = self.type in ('list', 'set')
+ needs_ansible_elements = self.ansible_type in ('list', )
+ if elements is not None and not needs_elements:
+ raise Exception('elements only allowed for lists/sets')
+ if elements is None and needs_elements:
+ raise Exception('elements required for lists/sets')
+ if ansible_elements is not None and not needs_ansible_elements:
+ raise Exception('Ansible elements only allowed for Ansible lists')
+ if (elements is None and ansible_elements is None) and needs_ansible_elements:
+ raise Exception('Ansible elements required for Ansible lists')
+ self.elements = elements if needs_elements else None
+ self.ansible_elements = (ansible_elements or _get_ansible_type(elements)) if needs_ansible_elements else None
+ needs_suboptions = (self.ansible_type == 'list' and self.ansible_elements == 'dict') or (self.ansible_type == 'dict')
+ if ansible_suboptions is not None and not needs_suboptions:
+ raise Exception('suboptions only allowed for Ansible lists with dicts, or Ansible dicts')
+ if ansible_suboptions is None and needs_suboptions and not needs_no_suboptions and not not_an_ansible_option:
+ raise Exception('suboptions required for Ansible lists with dicts, or Ansible dicts')
+ self.ansible_suboptions = ansible_suboptions if needs_suboptions else None
+ self.ansible_aliases = ansible_aliases or []
+ self.ansible_choices = ansible_choices
+ comparison_type = self.type
+ if comparison_type == 'set' and self.elements == 'dict':
+ comparison_type = 'set(dict)'
+ elif comparison_type not in ('set', 'list', 'dict'):
+ comparison_type = 'value'
+ self.comparison_type = comparison_type
+ if default_comparison is not None:
+ self.comparison = default_comparison
+ elif comparison_type in ('list', 'value'):
+ self.comparison = 'strict'
+ else:
+ self.comparison = 'allow_more_present'
+ self.not_a_container_option = not_a_container_option
+ self.not_an_ansible_option = not_an_ansible_option
+ self.copy_comparison_from = copy_comparison_from
+
+
+class OptionGroup(object):
+ def __init__(
+ self,
+ preprocess=None,
+ ansible_mutually_exclusive=None,
+ ansible_required_together=None,
+ ansible_required_one_of=None,
+ ansible_required_if=None,
+ ansible_required_by=None,
+ ):
+ if preprocess is None:
+ def preprocess(module, values):
+ return values
+ self.preprocess = preprocess
+ self.options = []
+ self.all_options = []
+ self.engines = {}
+ self.ansible_mutually_exclusive = ansible_mutually_exclusive or []
+ self.ansible_required_together = ansible_required_together or []
+ self.ansible_required_one_of = ansible_required_one_of or []
+ self.ansible_required_if = ansible_required_if or []
+ self.ansible_required_by = ansible_required_by or {}
+ self.argument_spec = {}
+
+ def add_option(self, *args, **kwargs):
+ option = Option(*args, owner=self, **kwargs)
+ if not option.not_a_container_option:
+ self.options.append(option)
+ self.all_options.append(option)
+ if not option.not_an_ansible_option:
+ ansible_option = {
+ 'type': option.ansible_type,
+ }
+ if option.ansible_elements is not None:
+ ansible_option['elements'] = option.ansible_elements
+ if option.ansible_suboptions is not None:
+ ansible_option['options'] = option.ansible_suboptions
+ if option.ansible_aliases:
+ ansible_option['aliases'] = option.ansible_aliases
+ if option.ansible_choices is not None:
+ ansible_option['choices'] = option.ansible_choices
+ self.argument_spec[option.name] = ansible_option
+ return self
+
+ def supports_engine(self, engine_name):
+ return engine_name in self.engines
+
+ def get_engine(self, engine_name):
+ return self.engines[engine_name]
+
+ def add_engine(self, engine_name, engine):
+ self.engines[engine_name] = engine
+ return self
+
+
+class Engine(object):
+ min_api_version = None # string or None
+ min_api_version_obj = None # LooseVersion object or None
+
+ @abc.abstractmethod
+ def get_value(self, module, container, api_version, options):
+ pass
+
+ @abc.abstractmethod
+ def set_value(self, module, data, api_version, options, values):
+ pass
+
+ @abc.abstractmethod
+ def get_expected_values(self, module, client, api_version, options, image, values):
+ pass
+
+ @abc.abstractmethod
+ def ignore_mismatching_result(self, module, client, api_version, option, image, container_value, expected_value):
+ pass
+
+ @abc.abstractmethod
+ def preprocess_value(self, module, client, api_version, options, values):
+ pass
+
+ @abc.abstractmethod
+ def update_value(self, module, data, api_version, options, values):
+ pass
+
+ @abc.abstractmethod
+ def can_set_value(self, api_version):
+ pass
+
+ @abc.abstractmethod
+ def can_update_value(self, api_version):
+ pass
+
+
+class EngineDriver(object):
+ name = None # string
+
+ @abc.abstractmethod
+ def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None):
+ # Return (module, active_options, client)
+ pass
+
+ @abc.abstractmethod
+ def get_api_version(self, client):
+ pass
+
+ @abc.abstractmethod
+ def get_container_id(self, container):
+ pass
+
+ @abc.abstractmethod
+ def get_image_from_container(self, container):
+ pass
+
+ @abc.abstractmethod
+ def get_image_name_from_container(self, container):
+ pass
+
+ @abc.abstractmethod
+ def is_container_removing(self, container):
+ pass
+
+ @abc.abstractmethod
+ def is_container_running(self, container):
+ pass
+
+ @abc.abstractmethod
+ def is_container_paused(self, container):
+ pass
+
+ @abc.abstractmethod
+ def inspect_container_by_name(self, client, container_name):
+ pass
+
+ @abc.abstractmethod
+ def inspect_container_by_id(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def inspect_image_by_id(self, client, image_id):
+ pass
+
+ @abc.abstractmethod
+ def inspect_image_by_name(self, client, repository, tag):
+ pass
+
+ @abc.abstractmethod
+ def pull_image(self, client, repository, tag, platform=None):
+ pass
+
+ @abc.abstractmethod
+ def pause_container(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def unpause_container(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def disconnect_container_from_network(self, client, container_id, network_id):
+ pass
+
+ @abc.abstractmethod
+ def connect_container_to_network(self, client, container_id, network_id, parameters=None):
+ pass
+
+ @abc.abstractmethod
+ def create_container(self, client, container_name, create_parameters):
+ pass
+
+ @abc.abstractmethod
+ def start_container(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def wait_for_container(self, client, container_id, timeout=None):
+ pass
+
+ @abc.abstractmethod
+ def get_container_output(self, client, container_id):
+ pass
+
+ @abc.abstractmethod
+ def update_container(self, client, container_id, update_parameters):
+ pass
+
+ @abc.abstractmethod
+ def restart_container(self, client, container_id, timeout=None):
+ pass
+
+ @abc.abstractmethod
+ def kill_container(self, client, container_id, kill_signal=None):
+ pass
+
+ @abc.abstractmethod
+ def stop_container(self, client, container_id, timeout=None):
+ pass
+
+ @abc.abstractmethod
+ def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False):
+ pass
+
+ @abc.abstractmethod
+ def run(self, runner, client):
+ pass
+
+
+def _is_volume_permissions(mode):
+ for part in mode.split(','):
+ if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
+ return False
+ return True
+
+
+def _parse_port_range(range_or_port, module):
+ '''
+ Parses a string containing either a single port or a range of ports.
+
+ Returns a list of integers for each port in the list.
+ '''
+ if '-' in range_or_port:
+ try:
+ start, end = [int(port) for port in range_or_port.split('-')]
+ except Exception:
+ module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port))
+ if end < start:
+ module.fail_json(msg='Invalid port range: "{0}"'.format(range_or_port))
+ return list(range(start, end + 1))
+ else:
+ try:
+ return [int(range_or_port)]
+ except Exception:
+ module.fail_json(msg='Invalid port: "{0}"'.format(range_or_port))
+
+
+def _split_colon_ipv6(text, module):
+ '''
+ Split string by ':', while keeping IPv6 addresses in square brackets in one component.
+ '''
+ if '[' not in text:
+ return text.split(':')
+ start = 0
+ result = []
+ while start < len(text):
+ i = text.find('[', start)
+ if i < 0:
+ result.extend(text[start:].split(':'))
+ break
+ j = text.find(']', i)
+ if j < 0:
+ module.fail_json(msg='Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1))
+ result.extend(text[start:i].split(':'))
+ k = text.find(':', j)
+ if k < 0:
+ result[-1] += text[i:]
+ start = len(text)
+ else:
+ result[-1] += text[i:k]
+ if k == len(text):
+ result.append('')
+ break
+ start = k + 1
+ return result
+
+
+def _preprocess_command(module, values):
+ if 'command' not in values:
+ return values
+ value = values['command']
+ if module.params['command_handling'] == 'correct':
+ if value is not None:
+ if not isinstance(value, list):
+ # convert from str to list
+ value = shlex.split(to_text(value, errors='surrogate_or_strict'))
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ elif value:
+ # convert from list to str
+ if isinstance(value, list):
+ value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value]))
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ else:
+ value = shlex.split(to_text(value, errors='surrogate_or_strict'))
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ else:
+ return {}
+ return {
+ 'command': value,
+ }
+
+
+def _preprocess_entrypoint(module, values):
+ if 'entrypoint' not in values:
+ return values
+ value = values['entrypoint']
+ if module.params['command_handling'] == 'correct':
+ if value is not None:
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ elif value:
+ # convert from list to str.
+ value = shlex.split(' '.join([to_text(x, errors='surrogate_or_strict') for x in value]))
+ value = [to_text(x, errors='surrogate_or_strict') for x in value]
+ else:
+ return {}
+ return {
+ 'entrypoint': value,
+ }
+
+
+def _preprocess_env(module, values):
+ if not values:
+ return {}
+ final_env = {}
+ if 'env_file' in values:
+ parsed_env_file = parse_env_file(values['env_file'])
+ for name, value in parsed_env_file.items():
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ if 'env' in values:
+ for name, value in values['env'].items():
+ if not isinstance(value, string_types):
+ module.fail_json(msg='Non-string value found for env option. Ambiguous env options must be '
+ 'wrapped in quotes to avoid them being interpreted. Key: %s' % (name, ))
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ formatted_env = []
+ for key, value in final_env.items():
+ formatted_env.append('%s=%s' % (key, value))
+ return {
+ 'env': formatted_env,
+ }
+
+
+def _preprocess_healthcheck(module, values):
+ if not values:
+ return {}
+ return {
+ 'healthcheck': normalize_healthcheck(values['healthcheck'], normalize_test=False),
+ }
+
+
+def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None):
+ if name not in values:
+ return values
+ try:
+ value = values[name]
+ if unlimited_value is not None and value in ('unlimited', str(unlimited_value)):
+ value = unlimited_value
+ else:
+ value = human_to_bytes(value)
+ values[name] = value
+ return values
+ except ValueError as exc:
+ module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
+
+
+def _preprocess_mac_address(module, values):
+ if 'mac_address' not in values:
+ return values
+ return {
+ 'mac_address': values['mac_address'].replace('-', ':'),
+ }
+
+
+def _preprocess_networks(module, values):
+ if module.params['networks_cli_compatible'] is True and values.get('networks') and 'network_mode' not in values:
+ # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode
+ # (assuming no explicit value is specified for network_mode)
+ values['network_mode'] = values['networks'][0]['name']
+
+ if 'networks' in values:
+ for network in values['networks']:
+ if network['links']:
+ parsed_links = []
+ for link in network['links']:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 1:
+ parsed_link = (link, link)
+ parsed_links.append(tuple(parsed_link))
+ network['links'] = parsed_links
+
+ return values
+
+
+def _preprocess_sysctls(module, values):
+ if 'sysctls' in values:
+ for key, value in values['sysctls'].items():
+ values['sysctls'][key] = to_text(value, errors='surrogate_or_strict')
+ return values
+
+
+def _preprocess_tmpfs(module, values):
+ if 'tmpfs' not in values:
+ return values
+ result = {}
+ for tmpfs_spec in values['tmpfs']:
+ split_spec = tmpfs_spec.split(":", 1)
+ if len(split_spec) > 1:
+ result[split_spec[0]] = split_spec[1]
+ else:
+ result[split_spec[0]] = ""
+ return {
+ 'tmpfs': result
+ }
+
+
+def _preprocess_ulimits(module, values):
+ if 'ulimits' not in values:
+ return values
+ result = []
+ for limit in values['ulimits']:
+ limits = dict()
+ pieces = limit.split(':')
+ if len(pieces) >= 2:
+ limits['Name'] = pieces[0]
+ limits['Soft'] = int(pieces[1])
+ limits['Hard'] = int(pieces[1])
+ if len(pieces) == 3:
+ limits['Hard'] = int(pieces[2])
+ result.append(limits)
+ return {
+ 'ulimits': result,
+ }
+
+
+def _preprocess_mounts(module, values):
+ last = dict()
+
+ def check_collision(t, name):
+ if t in last:
+ if name == last[t]:
+ module.fail_json(msg='The mount point "{0}" appears twice in the {1} option'.format(t, name))
+ else:
+ module.fail_json(msg='The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t]))
+ last[t] = name
+
+ if 'mounts' in values:
+ mounts = []
+ for mount in values['mounts']:
+ target = mount['target']
+ mount_type = mount['type']
+
+ check_collision(target, 'mounts')
+
+ mount_dict = dict(mount)
+
+ # Sanity checks
+ if mount['source'] is None and mount_type not in ('tmpfs', 'volume'):
+ module.fail_json(msg='source must be specified for mount "{0}" of type "{1}"'.format(target, mount_type))
+ for option, req_mount_type in _MOUNT_OPTION_TYPES.items():
+ if mount[option] is not None and mount_type != req_mount_type:
+ module.fail_json(
+ msg='{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, mount_type, req_mount_type)
+ )
+
+ # Streamline options
+ volume_options = mount_dict.pop('volume_options')
+ if mount_dict['volume_driver'] and volume_options:
+ mount_dict['volume_options'] = clean_dict_booleans_for_docker_api(volume_options)
+ if mount_dict['labels']:
+ mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels'])
+ if mount_dict['tmpfs_size'] is not None:
+ try:
+ mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size'])
+ except ValueError as exc:
+ module.fail_json(msg='Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, to_native(exc)))
+ if mount_dict['tmpfs_mode'] is not None:
+ try:
+ mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8)
+ except Exception as dummy:
+ module.fail_json(msg='tmp_fs mode of mount "{0}" is not an octal string!'.format(target))
+
+ # Add result to list
+ mounts.append(omit_none_from_dict(mount_dict))
+ values['mounts'] = mounts
+ if 'volumes' in values:
+ new_vols = []
+ for vol in values['volumes']:
+ parts = vol.split(':')
+ if ':' in vol:
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not _is_volume_permissions(mode):
+ module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode))
+ if re.match(r'[.~]', host):
+ host = os.path.abspath(os.path.expanduser(host))
+ check_collision(container, 'volumes')
+ new_vols.append("%s:%s:%s" % (host, container, mode))
+ continue
+ elif len(parts) == 2:
+ if not _is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
+ host = os.path.abspath(os.path.expanduser(parts[0]))
+ check_collision(parts[1], 'volumes')
+ new_vols.append("%s:%s:rw" % (host, parts[1]))
+ continue
+ check_collision(parts[min(1, len(parts) - 1)], 'volumes')
+ new_vols.append(vol)
+ values['volumes'] = new_vols
+ new_binds = []
+ for vol in new_vols:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not _is_volume_permissions(mode):
+ module.fail_json(msg='Found invalid volumes mode: {0}'.format(mode))
+ elif len(parts) == 2:
+ if not _is_volume_permissions(parts[1]):
+ host, container, mode = (parts + ['rw'])
+ if host is not None:
+ new_binds.append('%s:%s:%s' % (host, container, mode))
+ values['volume_binds'] = new_binds
+ return values
+
+
+def _preprocess_log(module, values):
+ result = {}
+ if 'log_driver' not in values:
+ return result
+ result['log_driver'] = values['log_driver']
+ if 'log_options' in values:
+ options = {}
+ for k, v in values['log_options'].items():
+ if not isinstance(v, string_types):
+ module.warn(
+ "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
+ "If this is not correct, or you want to avoid such warnings, please quote the value." % (
+ k, to_text(v, errors='surrogate_or_strict'))
+ )
+ v = to_text(v, errors='surrogate_or_strict')
+ options[k] = v
+ result['log_options'] = options
+ return result
+
+
+def _preprocess_ports(module, values):
+ if 'published_ports' in values:
+ if 'all' in values['published_ports']:
+ module.fail_json(
+ msg='Specifying "all" in published_ports is no longer allowed. Set publish_all_ports to "true" instead '
+ 'to randomly assign port mappings for those not specified by published_ports.')
+
+ binds = {}
+ for port in values['published_ports']:
+ parts = _split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), module)
+ container_port = parts[-1]
+ protocol = ''
+ if '/' in container_port:
+ container_port, protocol = parts[-1].split('/')
+ container_ports = _parse_port_range(container_port, module)
+
+ p_len = len(parts)
+ if p_len == 1:
+ port_binds = len(container_ports) * [(_DEFAULT_IP_REPLACEMENT_STRING, )]
+ elif p_len == 2:
+ if len(container_ports) == 1:
+ port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, parts[0])]
+ else:
+ port_binds = [(_DEFAULT_IP_REPLACEMENT_STRING, port) for port in _parse_port_range(parts[0], module)]
+ elif p_len == 3:
+ # We only allow IPv4 and IPv6 addresses for the bind address
+ ipaddr = parts[0]
+ if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr):
+ module.fail_json(
+ msg='Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
+ 'Use the dig lookup to resolve hostnames. (Found hostname: {0})'.format(ipaddr)
+ )
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
+ ipaddr = ipaddr[1:-1]
+ if parts[1]:
+ if len(container_ports) == 1:
+ port_binds = [(ipaddr, parts[1])]
+ else:
+ port_binds = [(ipaddr, port) for port in _parse_port_range(parts[1], module)]
+ else:
+ port_binds = len(container_ports) * [(ipaddr,)]
+ else:
+ module.fail_json(
+ msg='Invalid port description "%s" - expected 1 to 3 colon-separated parts, but got %d. '
+ 'Maybe you forgot to use square brackets ([...]) around an IPv6 address?' % (port, p_len)
+ )
+
+ for bind, container_port in zip(port_binds, container_ports):
+ idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
+ if idx in binds:
+ old_bind = binds[idx]
+ if isinstance(old_bind, list):
+ old_bind.append(bind)
+ else:
+ binds[idx] = [old_bind, bind]
+ else:
+ binds[idx] = bind
+ values['published_ports'] = binds
+
+ exposed = []
+ if 'exposed_ports' in values:
+ for port in values['exposed_ports']:
+ port = to_text(port, errors='surrogate_or_strict').strip()
+ protocol = 'tcp'
+ match = re.search(r'(/.+$)', port)
+ if match:
+ protocol = match.group(1).replace('/', '')
+ port = re.sub(r'/.+$', '', port)
+ exposed.append((port, protocol))
+ if 'published_ports' in values:
+ # Any published port should also be exposed
+ for publish_port in values['published_ports']:
+ match = False
+ if isinstance(publish_port, string_types) and '/' in publish_port:
+ port, protocol = publish_port.split('/')
+ port = int(port)
+ else:
+ protocol = 'tcp'
+ port = int(publish_port)
+ for exposed_port in exposed:
+ if exposed_port[1] != protocol:
+ continue
+ if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
+ start_port, end_port = exposed_port[0].split('-')
+ if int(start_port) <= port <= int(end_port):
+ match = True
+ elif exposed_port[0] == port:
+ match = True
+ if not match:
+ exposed.append((port, protocol))
+ values['ports'] = exposed
+ return values
+
+
+OPTION_AUTO_REMOVE = (
+ OptionGroup()
+ .add_option('auto_remove', type='bool')
+)
+
+OPTION_BLKIO_WEIGHT = (
+ OptionGroup()
+ .add_option('blkio_weight', type='int')
+)
+
+OPTION_CAPABILITIES = (
+ OptionGroup()
+ .add_option('capabilities', type='set', elements='str')
+)
+
+OPTION_CAP_DROP = (
+ OptionGroup()
+ .add_option('cap_drop', type='set', elements='str')
+)
+
+OPTION_CGROUP_NS_MODE = (
+ OptionGroup()
+ .add_option('cgroupns_mode', type='str', ansible_choices=['private', 'host'])
+)
+
+OPTION_CGROUP_PARENT = (
+ OptionGroup()
+ .add_option('cgroup_parent', type='str')
+)
+
+OPTION_COMMAND = (
+ OptionGroup(preprocess=_preprocess_command)
+ .add_option('command', type='list', elements='str', ansible_type='raw')
+)
+
+OPTION_CPU_PERIOD = (
+ OptionGroup()
+ .add_option('cpu_period', type='int')
+)
+
+OPTION_CPU_QUOTA = (
+ OptionGroup()
+ .add_option('cpu_quota', type='int')
+)
+
+OPTION_CPUSET_CPUS = (
+ OptionGroup()
+ .add_option('cpuset_cpus', type='str')
+)
+
+OPTION_CPUSET_MEMS = (
+ OptionGroup()
+ .add_option('cpuset_mems', type='str')
+)
+
+OPTION_CPU_SHARES = (
+ OptionGroup()
+ .add_option('cpu_shares', type='int')
+)
+
+OPTION_ENTRYPOINT = (
+ OptionGroup(preprocess=_preprocess_entrypoint)
+ .add_option('entrypoint', type='list', elements='str')
+)
+
+OPTION_CPUS = (
+ OptionGroup()
+ .add_option('cpus', type='int', ansible_type='float')
+)
+
+OPTION_DETACH_INTERACTIVE = (
+ OptionGroup()
+ .add_option('detach', type='bool')
+ .add_option('interactive', type='bool')
+)
+
+OPTION_DEVICES = (
+ OptionGroup()
+ .add_option('devices', type='set', elements='dict', ansible_elements='str')
+)
+
+OPTION_DEVICE_READ_BPS = (
+ OptionGroup()
+ .add_option('device_read_bps', type='set', elements='dict', ansible_suboptions=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ ))
+)
+
+OPTION_DEVICE_WRITE_BPS = (
+ OptionGroup()
+ .add_option('device_write_bps', type='set', elements='dict', ansible_suboptions=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ ))
+)
+
+OPTION_DEVICE_READ_IOPS = (
+ OptionGroup()
+ .add_option('device_read_iops', type='set', elements='dict', ansible_suboptions=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ ))
+)
+
+OPTION_DEVICE_WRITE_IOPS = (
+ OptionGroup()
+ .add_option('device_write_iops', type='set', elements='dict', ansible_suboptions=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ ))
+)
+
+OPTION_DEVICE_REQUESTS = (
+ OptionGroup()
+ .add_option('device_requests', type='set', elements='dict', ansible_suboptions=dict(
+ capabilities=dict(type='list', elements='list'),
+ count=dict(type='int'),
+ device_ids=dict(type='list', elements='str'),
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ ))
+)
+
+OPTION_DNS_SERVERS = (
+ OptionGroup()
+ .add_option('dns_servers', type='list', elements='str')
+)
+
+OPTION_DNS_OPTS = (
+ OptionGroup()
+ .add_option('dns_opts', type='set', elements='str')
+)
+
+OPTION_DNS_SEARCH_DOMAINS = (
+ OptionGroup()
+ .add_option('dns_search_domains', type='list', elements='str')
+)
+
+OPTION_DOMAINNAME = (
+ OptionGroup()
+ .add_option('domainname', type='str')
+)
+
+OPTION_ENVIRONMENT = (
+ OptionGroup(preprocess=_preprocess_env)
+ .add_option('env', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True)
+ .add_option('env_file', type='set', ansible_type='path', elements='str', not_a_container_option=True)
+)
+
+OPTION_ETC_HOSTS = (
+ OptionGroup()
+ .add_option('etc_hosts', type='set', ansible_type='dict', elements='str', needs_no_suboptions=True)
+)
+
+OPTION_GROUPS = (
+ OptionGroup()
+ .add_option('groups', type='set', elements='str')
+)
+
+OPTION_HEALTHCHECK = (
+ OptionGroup(preprocess=_preprocess_healthcheck)
+ .add_option('healthcheck', type='dict', ansible_suboptions=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ ))
+)
+
+OPTION_HOSTNAME = (
+ OptionGroup()
+ .add_option('hostname', type='str')
+)
+
+OPTION_IMAGE = (
+ OptionGroup(preprocess=_preprocess_networks)
+ .add_option('image', type='str')
+)
+
+OPTION_INIT = (
+ OptionGroup()
+ .add_option('init', type='bool')
+)
+
+OPTION_IPC_MODE = (
+ OptionGroup()
+ .add_option('ipc_mode', type='str')
+)
+
+OPTION_KERNEL_MEMORY = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='kernel_memory'))
+ .add_option('kernel_memory', type='int', ansible_type='str')
+)
+
+OPTION_LABELS = (
+ OptionGroup()
+ .add_option('labels', type='dict', needs_no_suboptions=True)
+)
+
+OPTION_LINKS = (
+ OptionGroup()
+ .add_option('links', type='set', elements='list', ansible_elements='str')
+)
+
+OPTION_LOG_DRIVER_OPTIONS = (
+ OptionGroup(preprocess=_preprocess_log, ansible_required_by={'log_options': ['log_driver']})
+ .add_option('log_driver', type='str')
+ .add_option('log_options', type='dict', ansible_aliases=['log_opt'], needs_no_suboptions=True)
+)
+
+OPTION_MAC_ADDRESS = (
+ OptionGroup(preprocess=_preprocess_mac_address)
+ .add_option('mac_address', type='str')
+)
+
+OPTION_MEMORY = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory'))
+ .add_option('memory', type='int', ansible_type='str')
+)
+
+OPTION_MEMORY_RESERVATION = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_reservation'))
+ .add_option('memory_reservation', type='int', ansible_type='str')
+)
+
+OPTION_MEMORY_SWAP = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='memory_swap', unlimited_value=-1))
+ .add_option('memory_swap', type='int', ansible_type='str')
+)
+
+OPTION_MEMORY_SWAPPINESS = (
+ OptionGroup()
+ .add_option('memory_swappiness', type='int')
+)
+
+OPTION_STOP_TIMEOUT = (
+ OptionGroup()
+ .add_option('stop_timeout', type='int', default_comparison='ignore')
+)
+
+OPTION_NETWORK = (
+ OptionGroup(preprocess=_preprocess_networks)
+ .add_option('network_mode', type='str')
+ .add_option('networks', type='set', elements='dict', ansible_suboptions=dict(
+ name=dict(type='str', required=True),
+ ipv4_address=dict(type='str'),
+ ipv6_address=dict(type='str'),
+ aliases=dict(type='list', elements='str'),
+ links=dict(type='list', elements='str'),
+ ))
+)
+
+OPTION_OOM_KILLER = (
+ OptionGroup()
+ .add_option('oom_killer', type='bool')
+)
+
+OPTION_OOM_SCORE_ADJ = (
+ OptionGroup()
+ .add_option('oom_score_adj', type='int')
+)
+
+OPTION_PID_MODE = (
+ OptionGroup()
+ .add_option('pid_mode', type='str')
+)
+
+OPTION_PIDS_LIMIT = (
+ OptionGroup()
+ .add_option('pids_limit', type='int')
+)
+
+OPTION_PLATFORM = (
+ OptionGroup()
+ .add_option('platform', type='str')
+)
+
+OPTION_PRIVILEGED = (
+ OptionGroup()
+ .add_option('privileged', type='bool')
+)
+
+OPTION_READ_ONLY = (
+ OptionGroup()
+ .add_option('read_only', type='bool')
+)
+
+OPTION_RESTART_POLICY = (
+ OptionGroup(ansible_required_by={'restart_retries': ['restart_policy']})
+ .add_option('restart_policy', type='str', ansible_choices=['no', 'on-failure', 'always', 'unless-stopped'])
+ .add_option('restart_retries', type='int')
+)
+
+OPTION_RUNTIME = (
+ OptionGroup()
+ .add_option('runtime', type='str')
+)
+
+OPTION_SECURITY_OPTS = (
+ OptionGroup()
+ .add_option('security_opts', type='set', elements='str')
+)
+
+OPTION_SHM_SIZE = (
+ OptionGroup(preprocess=partial(_preprocess_convert_to_bytes, name='shm_size'))
+ .add_option('shm_size', type='int', ansible_type='str')
+)
+
+OPTION_STOP_SIGNAL = (
+ OptionGroup()
+ .add_option('stop_signal', type='str')
+)
+
+OPTION_STORAGE_OPTS = (
+ OptionGroup()
+ .add_option('storage_opts', type='dict', needs_no_suboptions=True)
+)
+
+OPTION_SYSCTLS = (
+ OptionGroup(preprocess=_preprocess_sysctls)
+ .add_option('sysctls', type='dict', needs_no_suboptions=True)
+)
+
+OPTION_TMPFS = (
+ OptionGroup(preprocess=_preprocess_tmpfs)
+ .add_option('tmpfs', type='dict', ansible_type='list', ansible_elements='str')
+)
+
+OPTION_TTY = (
+ OptionGroup()
+ .add_option('tty', type='bool')
+)
+
+OPTION_ULIMITS = (
+ OptionGroup(preprocess=_preprocess_ulimits)
+ .add_option('ulimits', type='set', elements='dict', ansible_elements='str')
+)
+
+OPTION_USER = (
+ OptionGroup()
+ .add_option('user', type='str')
+)
+
+OPTION_USERNS_MODE = (
+ OptionGroup()
+ .add_option('userns_mode', type='str')
+)
+
+OPTION_UTS = (
+ OptionGroup()
+ .add_option('uts', type='str')
+)
+
+OPTION_VOLUME_DRIVER = (
+ OptionGroup()
+ .add_option('volume_driver', type='str')
+)
+
+OPTION_VOLUMES_FROM = (
+ OptionGroup()
+ .add_option('volumes_from', type='set', elements='str')
+)
+
+OPTION_WORKING_DIR = (
+ OptionGroup()
+ .add_option('working_dir', type='str')
+)
+
+OPTION_MOUNTS_VOLUMES = (
+ OptionGroup(preprocess=_preprocess_mounts)
+ .add_option('mounts', type='set', elements='dict', ansible_suboptions=dict(
+ target=dict(type='str', required=True),
+ source=dict(type='str'),
+ type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'),
+ read_only=dict(type='bool'),
+ consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']),
+ propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']),
+ no_copy=dict(type='bool'),
+ labels=dict(type='dict'),
+ volume_driver=dict(type='str'),
+ volume_options=dict(type='dict'),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='str'),
+ ))
+ .add_option('volumes', type='set', elements='str')
+ .add_option('volume_binds', type='set', elements='str', not_an_ansible_option=True, copy_comparison_from='volumes')
+)
+
+OPTION_PORTS = (
+ OptionGroup(preprocess=_preprocess_ports)
+ .add_option('exposed_ports', type='set', elements='str', ansible_aliases=['exposed', 'expose'])
+ .add_option('publish_all_ports', type='bool')
+ .add_option('published_ports', type='dict', ansible_type='list', ansible_elements='str', ansible_aliases=['ports'])
+ .add_option('ports', type='set', elements='str', not_an_ansible_option=True, default_comparison='ignore')
+)
+
+OPTIONS = [
+ OPTION_AUTO_REMOVE,
+ OPTION_BLKIO_WEIGHT,
+ OPTION_CAPABILITIES,
+ OPTION_CAP_DROP,
+ OPTION_CGROUP_NS_MODE,
+ OPTION_CGROUP_PARENT,
+ OPTION_COMMAND,
+ OPTION_CPU_PERIOD,
+ OPTION_CPU_QUOTA,
+ OPTION_CPUSET_CPUS,
+ OPTION_CPUSET_MEMS,
+ OPTION_CPU_SHARES,
+ OPTION_ENTRYPOINT,
+ OPTION_CPUS,
+ OPTION_DETACH_INTERACTIVE,
+ OPTION_DEVICES,
+ OPTION_DEVICE_READ_BPS,
+ OPTION_DEVICE_WRITE_BPS,
+ OPTION_DEVICE_READ_IOPS,
+ OPTION_DEVICE_WRITE_IOPS,
+ OPTION_DEVICE_REQUESTS,
+ OPTION_DNS_SERVERS,
+ OPTION_DNS_OPTS,
+ OPTION_DNS_SEARCH_DOMAINS,
+ OPTION_DOMAINNAME,
+ OPTION_ENVIRONMENT,
+ OPTION_ETC_HOSTS,
+ OPTION_GROUPS,
+ OPTION_HEALTHCHECK,
+ OPTION_HOSTNAME,
+ OPTION_IMAGE,
+ OPTION_INIT,
+ OPTION_IPC_MODE,
+ OPTION_KERNEL_MEMORY,
+ OPTION_LABELS,
+ OPTION_LINKS,
+ OPTION_LOG_DRIVER_OPTIONS,
+ OPTION_MAC_ADDRESS,
+ OPTION_MEMORY,
+ OPTION_MEMORY_RESERVATION,
+ OPTION_MEMORY_SWAP,
+ OPTION_MEMORY_SWAPPINESS,
+ OPTION_STOP_TIMEOUT,
+ OPTION_NETWORK,
+ OPTION_OOM_KILLER,
+ OPTION_OOM_SCORE_ADJ,
+ OPTION_PID_MODE,
+ OPTION_PIDS_LIMIT,
+ OPTION_PLATFORM,
+ OPTION_PRIVILEGED,
+ OPTION_READ_ONLY,
+ OPTION_RESTART_POLICY,
+ OPTION_RUNTIME,
+ OPTION_SECURITY_OPTS,
+ OPTION_SHM_SIZE,
+ OPTION_STOP_SIGNAL,
+ OPTION_STORAGE_OPTS,
+ OPTION_SYSCTLS,
+ OPTION_TMPFS,
+ OPTION_TTY,
+ OPTION_ULIMITS,
+ OPTION_USER,
+ OPTION_USERNS_MODE,
+ OPTION_UTS,
+ OPTION_VOLUME_DRIVER,
+ OPTION_VOLUMES_FROM,
+ OPTION_WORKING_DIR,
+ OPTION_MOUNTS_VOLUMES,
+ OPTION_PORTS,
+]
diff --git a/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py b/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py
new file mode 100644
index 000000000..cccf72df4
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py
@@ -0,0 +1,1353 @@
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import json
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native, to_text
+from ansible.module_utils.common.text.formatters import human_to_bytes
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.module_container.base import (
+ OPTION_AUTO_REMOVE,
+ OPTION_BLKIO_WEIGHT,
+ OPTION_CAPABILITIES,
+ OPTION_CAP_DROP,
+ OPTION_CGROUP_NS_MODE,
+ OPTION_CGROUP_PARENT,
+ OPTION_COMMAND,
+ OPTION_CPU_PERIOD,
+ OPTION_CPU_QUOTA,
+ OPTION_CPUSET_CPUS,
+ OPTION_CPUSET_MEMS,
+ OPTION_CPU_SHARES,
+ OPTION_ENTRYPOINT,
+ OPTION_CPUS,
+ OPTION_DETACH_INTERACTIVE,
+ OPTION_DEVICES,
+ OPTION_DEVICE_READ_BPS,
+ OPTION_DEVICE_WRITE_BPS,
+ OPTION_DEVICE_READ_IOPS,
+ OPTION_DEVICE_WRITE_IOPS,
+ OPTION_DEVICE_REQUESTS,
+ OPTION_DNS_SERVERS,
+ OPTION_DNS_OPTS,
+ OPTION_DNS_SEARCH_DOMAINS,
+ OPTION_DOMAINNAME,
+ OPTION_ENVIRONMENT,
+ OPTION_ETC_HOSTS,
+ OPTION_GROUPS,
+ OPTION_HEALTHCHECK,
+ OPTION_HOSTNAME,
+ OPTION_IMAGE,
+ OPTION_INIT,
+ OPTION_IPC_MODE,
+ OPTION_KERNEL_MEMORY,
+ OPTION_LABELS,
+ OPTION_LINKS,
+ OPTION_LOG_DRIVER_OPTIONS,
+ OPTION_MAC_ADDRESS,
+ OPTION_MEMORY,
+ OPTION_MEMORY_RESERVATION,
+ OPTION_MEMORY_SWAP,
+ OPTION_MEMORY_SWAPPINESS,
+ OPTION_STOP_TIMEOUT,
+ OPTION_NETWORK,
+ OPTION_OOM_KILLER,
+ OPTION_OOM_SCORE_ADJ,
+ OPTION_PID_MODE,
+ OPTION_PIDS_LIMIT,
+ OPTION_PLATFORM,
+ OPTION_PRIVILEGED,
+ OPTION_READ_ONLY,
+ OPTION_RESTART_POLICY,
+ OPTION_RUNTIME,
+ OPTION_SECURITY_OPTS,
+ OPTION_SHM_SIZE,
+ OPTION_STOP_SIGNAL,
+ OPTION_STORAGE_OPTS,
+ OPTION_SYSCTLS,
+ OPTION_TMPFS,
+ OPTION_TTY,
+ OPTION_ULIMITS,
+ OPTION_USER,
+ OPTION_USERNS_MODE,
+ OPTION_UTS,
+ OPTION_VOLUME_DRIVER,
+ OPTION_VOLUMES_FROM,
+ OPTION_WORKING_DIR,
+ OPTION_MOUNTS_VOLUMES,
+ OPTION_PORTS,
+ OPTIONS,
+ Engine,
+ EngineDriver,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ normalize_healthcheck_test,
+ omit_none_from_dict,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
+ convert_port_bindings,
+ normalize_links,
+)
+
+
+_DEFAULT_IP_REPLACEMENT_STRING = '[[DEFAULT_IP:iewahhaeB4Sae6Aen8IeShairoh4zeph7xaekoh8Geingunaesaeweiy3ooleiwi]]'
+
+
+_MOUNT_OPTION_TYPES = dict(
+ volume_driver='volume',
+ volume_options='volume',
+ propagation='bind',
+ no_copy='volume',
+ labels='volume',
+ tmpfs_size='tmpfs',
+ tmpfs_mode='tmpfs',
+)
+
+
+def _get_ansible_type(type):
+ if type == 'set':
+ return 'list'
+ if type not in ('list', 'dict', 'bool', 'int', 'float', 'str'):
+ raise Exception('Invalid type "%s"' % (type, ))
+ return type
+
+
+_SENTRY = object()
+
+
+class DockerAPIEngineDriver(EngineDriver):
+ name = 'docker_api'
+
+ def setup(self, argument_spec, mutually_exclusive=None, required_together=None, required_one_of=None, required_if=None, required_by=None):
+ argument_spec = argument_spec or {}
+ mutually_exclusive = mutually_exclusive or []
+ required_together = required_together or []
+ required_one_of = required_one_of or []
+ required_if = required_if or []
+ required_by = required_by or {}
+
+ active_options = []
+ option_minimal_versions = {}
+ for options in OPTIONS:
+ if not options.supports_engine(self.name):
+ continue
+
+ mutually_exclusive.extend(options.ansible_mutually_exclusive)
+ required_together.extend(options.ansible_required_together)
+ required_one_of.extend(options.ansible_required_one_of)
+ required_if.extend(options.ansible_required_if)
+ required_by.update(options.ansible_required_by)
+ argument_spec.update(options.argument_spec)
+
+ engine = options.get_engine(self.name)
+ if engine.min_api_version is not None:
+ for option in options.options:
+ if not option.not_an_ansible_option:
+ option_minimal_versions[option.name] = {'docker_api_version': engine.min_api_version}
+
+ active_options.append(options)
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ required_together=required_together,
+ required_one_of=required_one_of,
+ required_if=required_if,
+ required_by=required_by,
+ option_minimal_versions=option_minimal_versions,
+ supports_check_mode=True,
+ )
+
+ return client.module, active_options, client
+
+ def get_api_version(self, client):
+ return client.docker_api_version
+
+ def get_container_id(self, container):
+ return container['Id']
+
+ def get_image_from_container(self, container):
+ return container['Image']
+
+ def get_image_name_from_container(self, container):
+ return container['Config'].get('Image')
+
+ def is_container_removing(self, container):
+ if container.get('State'):
+ return container['State'].get('Status') == 'removing'
+ return False
+
+ def is_container_running(self, container):
+ if container.get('State'):
+ if container['State'].get('Running') and not container['State'].get('Ghost', False):
+ return True
+ return False
+
+ def is_container_paused(self, container):
+ if container.get('State'):
+ return container['State'].get('Paused', False)
+ return False
+
+ def inspect_container_by_name(self, client, container_name):
+ return client.get_container(container_name)
+
+ def inspect_container_by_id(self, client, container_id):
+ return client.get_container_by_id(container_id)
+
+ def inspect_image_by_id(self, client, image_id):
+ return client.find_image_by_id(image_id)
+
+ def inspect_image_by_name(self, client, repository, tag):
+ return client.find_image(repository, tag)
+
+ def pull_image(self, client, repository, tag, platform=None):
+ return client.pull_image(repository, tag, platform=platform)
+
+ def pause_container(self, client, container_id):
+ client.post_call('/containers/{0}/pause', container_id)
+
+ def unpause_container(self, client, container_id):
+ client.post_call('/containers/{0}/unpause', container_id)
+
+ def disconnect_container_from_network(self, client, container_id, network_id):
+ client.post_json('/networks/{0}/disconnect', network_id, data={'Container': container_id})
+
+ def connect_container_to_network(self, client, container_id, network_id, parameters=None):
+ parameters = (parameters or {}).copy()
+ params = {}
+ for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items():
+ value = parameters.pop(para, None)
+ if value:
+ if para == 'links':
+ value = normalize_links(value)
+ params[dest_para] = value
+ if parameters:
+ raise Exception(
+ 'Unknown parameter(s) for connect_container_to_network for Docker API driver: %s' % (', '.join(['"%s"' % p for p in sorted(parameters)])))
+ ipam_config = {}
+ for param in ('IPv4Address', 'IPv6Address'):
+ if param in params:
+ ipam_config[param] = params.pop(param)
+ if ipam_config:
+ params['IPAMConfig'] = ipam_config
+ data = {
+ 'Container': container_id,
+ 'EndpointConfig': params,
+ }
+ client.post_json('/networks/{0}/connect', network_id, data=data)
+
+ def create_container(self, client, container_name, create_parameters):
+ params = {'name': container_name}
+ if 'platform' in create_parameters:
+ params['platform'] = create_parameters.pop('platform')
+ new_container = client.post_json_to_json('/containers/create', data=create_parameters, params=params)
+ client.report_warnings(new_container)
+ return new_container['Id']
+
+ def start_container(self, client, container_id):
+ client.post_json('/containers/{0}/start', container_id)
+
+ def wait_for_container(self, client, container_id, timeout=None):
+ return client.post_json_to_json('/containers/{0}/wait', container_id, timeout=timeout)['StatusCode']
+
+ def get_container_output(self, client, container_id):
+ config = client.get_json('/containers/{0}/json', container_id)
+ logging_driver = config['HostConfig']['LogConfig']['Type']
+ if logging_driver in ('json-file', 'journald', 'local'):
+ params = {
+ 'stderr': 1,
+ 'stdout': 1,
+ 'timestamps': 0,
+ 'follow': 0,
+ 'tail': 'all',
+ }
+ res = client._get(client._url('/containers/{0}/logs', container_id), params=params)
+ output = client._get_result_tty(False, res, config['Config']['Tty'])
+ return output, True
+ else:
+ return "Result logged using `%s` driver" % logging_driver, False
+
+ def update_container(self, client, container_id, update_parameters):
+ result = client.post_json_to_json('/containers/{0}/update', container_id, data=update_parameters)
+ client.report_warnings(result)
+
+ def restart_container(self, client, container_id, timeout=None):
+ client_timeout = client.timeout
+ if client_timeout is not None:
+ client_timeout += timeout or 10
+ client.post_call('/containers/{0}/restart', container_id, params={'t': timeout}, timeout=client_timeout)
+
+ def kill_container(self, client, container_id, kill_signal=None):
+ params = {}
+ if kill_signal is not None:
+ params['signal'] = kill_signal
+ client.post_call('/containers/{0}/kill', container_id, params=params)
+
+ def stop_container(self, client, container_id, timeout=None):
+ if timeout:
+ params = {'t': timeout}
+ else:
+ params = {}
+ timeout = 10
+ client_timeout = client.timeout
+ if client_timeout is not None:
+ client_timeout += timeout
+ count = 0
+ while True:
+ try:
+ client.post_call('/containers/{0}/stop', container_id, params=params, timeout=client_timeout)
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ raise Exception('%s [tried to unpause three times]' % to_native(exc))
+ count += 1
+ # Unpause
+ try:
+ self.unpause_container(client, container_id)
+ except Exception as exc2:
+ raise Exception('%s [while unpausing]' % to_native(exc2))
+ # Now try again
+ continue
+ raise
+ # We only loop when explicitly requested by 'continue'
+ break
+
+ def remove_container(self, client, container_id, remove_volumes=False, link=False, force=False):
+ params = {'v': remove_volumes, 'link': link, 'force': force}
+ count = 0
+ while True:
+ try:
+ client.delete_call('/containers/{0}', container_id, params=params)
+ except NotFound as dummy:
+ pass
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ raise Exception('%s [tried to unpause three times]' % to_native(exc))
+ count += 1
+ # Unpause
+ try:
+ self.unpause_container(client, container_id)
+ except Exception as exc2:
+ raise Exception('%s [while unpausing]' % to_native(exc2))
+ # Now try again
+ continue
+ if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
+ pass
+ else:
+ raise
+ # We only loop when explicitly requested by 'continue'
+ break
+
+ def run(self, runner, client):
+ try:
+ runner()
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+class DockerAPIEngine(Engine):
+ def __init__(
+ self,
+ get_value,
+ preprocess_value=None,
+ get_expected_values=None,
+ ignore_mismatching_result=None,
+ set_value=None,
+ update_value=None,
+ can_set_value=None,
+ can_update_value=None,
+ min_api_version=None,
+ ):
+ self.min_api_version = min_api_version
+ self.min_api_version_obj = None if min_api_version is None else LooseVersion(min_api_version)
+ self.get_value = get_value
+ self.set_value = set_value
+ self.get_expected_values = get_expected_values or (lambda module, client, api_version, options, image, values: values)
+ self.ignore_mismatching_result = ignore_mismatching_result or \
+ (lambda module, client, api_version, option, image, container_value, expected_value: False)
+ self.preprocess_value = preprocess_value or (lambda module, client, api_version, options, values: values)
+ self.update_value = update_value
+ self.can_set_value = can_set_value or (lambda api_version: set_value is not None)
+ self.can_update_value = can_update_value or (lambda api_version: update_value is not None)
+
+ @classmethod
+ def config_value(
+ cls,
+ config_name,
+ postprocess_for_get=None,
+ preprocess_for_set=None,
+ get_expected_value=None,
+ ignore_mismatching_result=None,
+ min_api_version=None,
+ preprocess_value=None,
+ update_parameter=None,
+ ):
+ def preprocess_value_(module, client, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('config_value can only be used for a single option')
+ if preprocess_value is not None and options[0].name in values:
+ value = preprocess_value(module, client, api_version, values[options[0].name])
+ if value is None:
+ del values[options[0].name]
+ else:
+ values[options[0].name] = value
+ return values
+
+ def get_value(module, container, api_version, options):
+ if len(options) != 1:
+ raise AssertionError('config_value can only be used for a single option')
+ value = container['Config'].get(config_name, _SENTRY)
+ if postprocess_for_get:
+ value = postprocess_for_get(module, api_version, value, _SENTRY)
+ if value is _SENTRY:
+ return {}
+ return {options[0].name: value}
+
+ get_expected_values_ = None
+ if get_expected_value:
+ def get_expected_values_(module, client, api_version, options, image, values):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ value = values.get(options[0].name, _SENTRY)
+ value = get_expected_value(module, client, api_version, image, value, _SENTRY)
+ if value is _SENTRY:
+ return values
+ return {options[0].name: value}
+
+ def set_value(module, data, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('config_value can only be used for a single option')
+ if options[0].name not in values:
+ return
+ value = values[options[0].name]
+ if preprocess_for_set:
+ value = preprocess_for_set(module, api_version, value)
+ data[config_name] = value
+
+ update_value = None
+ if update_parameter:
+ def update_value(module, data, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('update_parameter can only be used for a single option')
+ if options[0].name not in values:
+ return
+ value = values[options[0].name]
+ if preprocess_for_set:
+ value = preprocess_for_set(module, api_version, value)
+ data[update_parameter] = value
+
+ return cls(
+ get_value=get_value,
+ preprocess_value=preprocess_value_,
+ get_expected_values=get_expected_values_,
+ ignore_mismatching_result=ignore_mismatching_result,
+ set_value=set_value,
+ min_api_version=min_api_version,
+ update_value=update_value,
+ )
+
+ @classmethod
+ def host_config_value(
+ cls,
+ host_config_name,
+ postprocess_for_get=None,
+ preprocess_for_set=None,
+ get_expected_value=None,
+ ignore_mismatching_result=None,
+ min_api_version=None,
+ preprocess_value=None,
+ update_parameter=None,
+ ):
+ def preprocess_value_(module, client, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ if preprocess_value is not None and options[0].name in values:
+ value = preprocess_value(module, client, api_version, values[options[0].name])
+ if value is None:
+ del values[options[0].name]
+ else:
+ values[options[0].name] = value
+ return values
+
+ def get_value(module, container, api_version, options):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ value = container['HostConfig'].get(host_config_name, _SENTRY)
+ if postprocess_for_get:
+ value = postprocess_for_get(module, api_version, value, _SENTRY)
+ if value is _SENTRY:
+ return {}
+ return {options[0].name: value}
+
+ get_expected_values_ = None
+ if get_expected_value:
+ def get_expected_values_(module, client, api_version, options, image, values):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ value = values.get(options[0].name, _SENTRY)
+ value = get_expected_value(module, client, api_version, image, value, _SENTRY)
+ if value is _SENTRY:
+ return values
+ return {options[0].name: value}
+
+ def set_value(module, data, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('host_config_value can only be used for a single option')
+ if options[0].name not in values:
+ return
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ value = values[options[0].name]
+ if preprocess_for_set:
+ value = preprocess_for_set(module, api_version, value)
+ data['HostConfig'][host_config_name] = value
+
+ update_value = None
+ if update_parameter:
+ def update_value(module, data, api_version, options, values):
+ if len(options) != 1:
+ raise AssertionError('update_parameter can only be used for a single option')
+ if options[0].name not in values:
+ return
+ value = values[options[0].name]
+ if preprocess_for_set:
+ value = preprocess_for_set(module, api_version, value)
+ data[update_parameter] = value
+
+ return cls(
+ get_value=get_value,
+ preprocess_value=preprocess_value_,
+ get_expected_values=get_expected_values_,
+ ignore_mismatching_result=ignore_mismatching_result,
+ set_value=set_value,
+ min_api_version=min_api_version,
+ update_value=update_value,
+ )
+
+
+def _is_volume_permissions(mode):
+ for part in mode.split(','):
+ if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
+ return False
+ return True
+
+
+def _normalize_port(port):
+ if '/' not in port:
+ return port + '/tcp'
+ return port
+
+
+def _get_default_host_ip(module, client):
+ if module.params['default_host_ip'] is not None:
+ return module.params['default_host_ip']
+ ip = '0.0.0.0'
+ for network_data in module.params['networks'] or []:
+ if network_data.get('name'):
+ network = client.get_network(network_data['name'])
+ if network is None:
+ client.fail(
+ "Cannot inspect the network '{0}' to determine the default IP".format(network_data['name']),
+ )
+ if network.get('Driver') == 'bridge' and network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
+ ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
+ break
+ return ip
+
+
+def _get_value_detach_interactive(module, container, api_version, options):
+ attach_stdin = container['Config'].get('OpenStdin')
+ attach_stderr = container['Config'].get('AttachStderr')
+ attach_stdout = container['Config'].get('AttachStdout')
+ return {
+ 'interactive': bool(attach_stdin),
+ 'detach': not (attach_stderr and attach_stdout),
+ }
+
+
+def _set_value_detach_interactive(module, data, api_version, options, values):
+ interactive = values.get('interactive')
+ detach = values.get('detach')
+
+ data['AttachStdout'] = False
+ data['AttachStderr'] = False
+ data['AttachStdin'] = False
+ data['StdinOnce'] = False
+ data['OpenStdin'] = interactive
+ if not detach:
+ data['AttachStdout'] = True
+ data['AttachStderr'] = True
+ if interactive:
+ data['AttachStdin'] = True
+ data['StdinOnce'] = True
+
+
+def _get_expected_env_value(module, client, api_version, image, value, sentry):
+ expected_env = {}
+ if image and image['Config'].get('Env'):
+ for env_var in image['Config']['Env']:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ if value and value is not sentry:
+ for env_var in value:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ param_env = []
+ for key, env_value in expected_env.items():
+ param_env.append("%s=%s" % (key, env_value))
+ return param_env
+
+
+def _preprocess_cpus(module, client, api_version, value):
+ if value is not None:
+ value = int(round(value * 1E9))
+ return value
+
+
+def _preprocess_devices(module, client, api_version, value):
+ if not value:
+ return value
+ expected_devices = []
+ for device in value:
+ parts = device.split(':')
+ if len(parts) == 1:
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[0],
+ PathOnHost=parts[0]
+ ))
+ elif len(parts) == 2:
+ parts = device.split(':')
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ )
+ )
+ else:
+ expected_devices.append(
+ dict(
+ CgroupPermissions=parts[2],
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ ))
+ return expected_devices
+
+
+def _preprocess_rate_bps(module, client, api_version, value):
+ if not value:
+ return value
+ devices = []
+ for device in value:
+ devices.append({
+ 'Path': device['path'],
+ 'Rate': human_to_bytes(device['rate']),
+ })
+ return devices
+
+
+def _preprocess_rate_iops(module, client, api_version, value):
+ if not value:
+ return value
+ devices = []
+ for device in value:
+ devices.append({
+ 'Path': device['path'],
+ 'Rate': device['rate'],
+ })
+ return devices
+
+
+def _preprocess_device_requests(module, client, api_version, value):
+ if not value:
+ return value
+ device_requests = []
+ for dr in value:
+ device_requests.append({
+ 'Driver': dr['driver'],
+ 'Count': dr['count'],
+ 'DeviceIDs': dr['device_ids'],
+ 'Capabilities': dr['capabilities'],
+ 'Options': dr['options'],
+ })
+ return device_requests
+
+
+def _preprocess_etc_hosts(module, client, api_version, value):
+ if value is None:
+ return value
+ results = []
+ for key, value in value.items():
+ results.append('%s%s%s' % (key, ':', value))
+ return results
+
+
+def _preprocess_healthcheck(module, client, api_version, value):
+ if value is None:
+ return value
+ if not value or not value.get('test'):
+ value = {'test': ['NONE']}
+ elif 'test' in value:
+ value['test'] = normalize_healthcheck_test(value['test'])
+ return omit_none_from_dict({
+ 'Test': value.get('test'),
+ 'Interval': value.get('interval'),
+ 'Timeout': value.get('timeout'),
+ 'StartPeriod': value.get('start_period'),
+ 'Retries': value.get('retries'),
+ })
+
+
+def _postprocess_healthcheck_get_value(module, api_version, value, sentry):
+ if value is None or value is sentry or value.get('Test') == ['NONE']:
+ return {'Test': ['NONE']}
+ return value
+
+
+def _preprocess_convert_to_bytes(module, values, name, unlimited_value=None):
+ if name not in values:
+ return values
+ try:
+ value = values[name]
+ if unlimited_value is not None and value in ('unlimited', str(unlimited_value)):
+ value = unlimited_value
+ else:
+ value = human_to_bytes(value)
+ values[name] = value
+ return values
+ except ValueError as exc:
+ module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc)))
+
+
+def _get_image_labels(image):
+ if not image:
+ return {}
+
+ # Can't use get('Labels', {}) because 'Labels' may be present and be None
+ return image['Config'].get('Labels') or {}
+
+
+def _get_expected_labels_value(module, client, api_version, image, value, sentry):
+ if value is sentry:
+ return sentry
+ expected_labels = {}
+ if module.params['image_label_mismatch'] == 'ignore':
+ expected_labels.update(dict(_get_image_labels(image)))
+ expected_labels.update(value)
+ return expected_labels
+
+
+def _preprocess_links(module, client, api_version, value):
+ if value is None:
+ return None
+
+ result = []
+ for link in value:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 2:
+ link, alias = parsed_link
+ else:
+ link, alias = parsed_link[0], parsed_link[0]
+ result.append('/%s:/%s/%s' % (link, module.params['name'], alias))
+
+ return result
+
+
+def _ignore_mismatching_label_result(module, client, api_version, option, image, container_value, expected_value):
+ if option.comparison == 'strict' and module.params['image_label_mismatch'] == 'fail':
+ # If there are labels from the base image that should be removed and
+ # base_image_mismatch is fail we want raise an error.
+ image_labels = _get_image_labels(image)
+ would_remove_labels = []
+ labels_param = module.params['labels'] or {}
+ for label in image_labels:
+ if label not in labels_param:
+ # Format label for error message
+ would_remove_labels.append('"%s"' % (label, ))
+ if would_remove_labels:
+ msg = ("Some labels should be removed but are present in the base image. You can set image_label_mismatch to 'ignore' to ignore"
+ " this error. Labels: {0}")
+ client.fail(msg.format(', '.join(would_remove_labels)))
+ return False
+
+
+def _ignore_mismatching_network_result(module, client, api_version, option, image, container_value, expected_value):
+ # 'networks' is handled out-of-band
+ if option.name == 'networks':
+ return True
+ return False
+
+
+def _preprocess_network_values(module, client, api_version, options, values):
+ if 'networks' in values:
+ for network in values['networks']:
+ network['id'] = _get_network_id(module, client, network['name'])
+ if not network['id']:
+ client.fail("Parameter error: network named %s could not be found. Does it exist?" % (network['name'], ))
+
+ if 'network_mode' in values:
+ values['network_mode'] = _preprocess_container_names(module, client, api_version, values['network_mode'])
+
+ return values
+
+
+def _get_network_id(module, client, network_name):
+ try:
+ network_id = None
+ params = {'filters': json.dumps({'name': [network_name]})}
+ for network in client.get_json('/networks', params=params):
+ if network['Name'] == network_name:
+ network_id = network['Id']
+ break
+ return network_id
+ except Exception as exc:
+ client.fail("Error getting network id for %s - %s" % (network_name, to_native(exc)))
+
+
+def _get_values_network(module, container, api_version, options):
+ value = container['HostConfig'].get('NetworkMode', _SENTRY)
+ if value is _SENTRY:
+ return {}
+ return {'network_mode': value}
+
+
+def _set_values_network(module, data, api_version, options, values):
+ if 'network_mode' not in values:
+ return
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ value = values['network_mode']
+ data['HostConfig']['NetworkMode'] = value
+
+
+def _get_values_mounts(module, container, api_version, options):
+ volumes = container['Config'].get('Volumes')
+ binds = container['HostConfig'].get('Binds')
+ # According to https://github.com/moby/moby/, support for HostConfig.Mounts
+ # has been included at least since v17.03.0-ce, which has API version 1.26.
+ # The previous tag, v1.9.1, has API version 1.21 and does not have
+ # HostConfig.Mounts. I have no idea what about API 1.25...
+ mounts = container['HostConfig'].get('Mounts')
+ if mounts is not None:
+ result = []
+ empty_dict = {}
+ for mount in mounts:
+ result.append({
+ 'type': mount.get('Type'),
+ 'source': mount.get('Source'),
+ 'target': mount.get('Target'),
+ 'read_only': mount.get('ReadOnly', False), # golang's omitempty for bool returns None for False
+ 'consistency': mount.get('Consistency'),
+ 'propagation': mount.get('BindOptions', empty_dict).get('Propagation'),
+ 'no_copy': mount.get('VolumeOptions', empty_dict).get('NoCopy', False),
+ 'labels': mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict),
+ 'volume_driver': mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name'),
+ 'volume_options': mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict),
+ 'tmpfs_size': mount.get('TmpfsOptions', empty_dict).get('SizeBytes'),
+ 'tmpfs_mode': mount.get('TmpfsOptions', empty_dict).get('Mode'),
+ })
+ mounts = result
+ result = {}
+ if volumes is not None:
+ result['volumes'] = volumes
+ if binds is not None:
+ result['volume_binds'] = binds
+ if mounts is not None:
+ result['mounts'] = mounts
+ return result
+
+
+def _get_bind_from_dict(volume_dict):
+ results = []
+ if volume_dict:
+ for host_path, config in volume_dict.items():
+ if isinstance(config, dict) and config.get('bind'):
+ container_path = config.get('bind')
+ mode = config.get('mode', 'rw')
+ results.append("%s:%s:%s" % (host_path, container_path, mode))
+ return results
+
+
+def _get_image_binds(volumes):
+ '''
+ Convert array of binds to array of strings with format host_path:container_path:mode
+
+ :param volumes: array of bind dicts
+ :return: array of strings
+ '''
+ results = []
+ if isinstance(volumes, dict):
+ results += _get_bind_from_dict(volumes)
+ elif isinstance(volumes, list):
+ for vol in volumes:
+ results += _get_bind_from_dict(vol)
+ return results
+
+
+def _get_expected_values_mounts(module, client, api_version, options, image, values):
+ expected_values = {}
+
+ # binds
+ if 'mounts' in values:
+ expected_values['mounts'] = values['mounts']
+
+ # volumes
+ expected_vols = dict()
+ if image and image['Config'].get('Volumes'):
+ expected_vols.update(image['Config'].get('Volumes'))
+ if 'volumes' in values:
+ for vol in values['volumes']:
+ # We only expect anonymous volumes to show up in the list
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not _is_volume_permissions(parts[1]):
+ continue
+ expected_vols[vol] = {}
+ if expected_vols:
+ expected_values['volumes'] = expected_vols
+
+ # binds
+ image_vols = []
+ if image:
+ image_vols = _get_image_binds(image['Config'].get('Volumes'))
+ param_vols = []
+ if 'volume_binds' in values:
+ param_vols = values['volume_binds']
+ expected_values['volume_binds'] = list(set(image_vols + param_vols))
+
+ return expected_values
+
+
+def _set_values_mounts(module, data, api_version, options, values):
+ if 'mounts' in values:
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ mounts = []
+ for mount in values['mounts']:
+ mount_type = mount.get('type')
+ mount_res = {
+ 'Target': mount.get('target'),
+ 'Source': mount.get('source'),
+ 'Type': mount_type,
+ 'ReadOnly': mount.get('read_only'),
+ }
+ if 'consistency' in mount:
+ mount_res['Consistency'] = mount['consistency']
+ if mount_type == 'bind':
+ if 'propagation' in mount:
+ mount_res['BindOptions'] = {
+ 'Propagation': mount['propagation'],
+ }
+ if mount_type == 'volume':
+ volume_opts = {}
+ if mount.get('no_copy'):
+ volume_opts['NoCopy'] = True
+ if mount.get('labels'):
+ volume_opts['Labels'] = mount.get('labels')
+ if mount.get('volume_driver'):
+ driver_config = {
+ 'Name': mount.get('volume_driver'),
+ }
+ if mount.get('volume_options'):
+ driver_config['Options'] = mount.get('volume_options')
+ volume_opts['DriverConfig'] = driver_config
+ if volume_opts:
+ mount_res['VolumeOptions'] = volume_opts
+ if mount_type == 'tmpfs':
+ tmpfs_opts = {}
+ if mount.get('tmpfs_mode'):
+ tmpfs_opts['Mode'] = mount.get('tmpfs_mode')
+ if mount.get('tmpfs_size'):
+ tmpfs_opts['SizeBytes'] = mount.get('tmpfs_size')
+ if tmpfs_opts:
+ mount_res['TmpfsOptions'] = tmpfs_opts
+ mounts.append(mount_res)
+ data['HostConfig']['Mounts'] = mounts
+ if 'volumes' in values:
+ volumes = {}
+ for volume in values['volumes']:
+ # Only pass anonymous volumes to create container
+ if ':' in volume:
+ parts = volume.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not _is_volume_permissions(parts[1]):
+ continue
+ volumes[volume] = {}
+ data['Volumes'] = volumes
+ if 'volume_binds' in values:
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['Binds'] = values['volume_binds']
+
+
+def _get_values_log(module, container, api_version, options):
+ log_config = container['HostConfig'].get('LogConfig') or {}
+ return {
+ 'log_driver': log_config.get('Type'),
+ 'log_options': log_config.get('Config'),
+ }
+
+
+def _set_values_log(module, data, api_version, options, values):
+ if 'log_driver' not in values:
+ return
+ log_config = {
+ 'Type': values['log_driver'],
+ 'Config': values.get('log_options') or {},
+ }
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['LogConfig'] = log_config
+
+
+def _get_values_platform(module, container, api_version, options):
+ return {
+ 'platform': container.get('Platform'),
+ }
+
+
+def _set_values_platform(module, data, api_version, options, values):
+ if 'platform' in values:
+ data['platform'] = values['platform']
+
+
+def _get_values_restart(module, container, api_version, options):
+ restart_policy = container['HostConfig'].get('RestartPolicy') or {}
+ return {
+ 'restart_policy': restart_policy.get('Name'),
+ 'restart_retries': restart_policy.get('MaximumRetryCount'),
+ }
+
+
+def _set_values_restart(module, data, api_version, options, values):
+ if 'restart_policy' not in values:
+ return
+ restart_policy = {
+ 'Name': values['restart_policy'],
+ 'MaximumRetryCount': values.get('restart_retries'),
+ }
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['RestartPolicy'] = restart_policy
+
+
+def _update_value_restart(module, data, api_version, options, values):
+ if 'restart_policy' not in values:
+ return
+ data['RestartPolicy'] = {
+ 'Name': values['restart_policy'],
+ 'MaximumRetryCount': values.get('restart_retries'),
+ }
+
+
+def _get_values_ports(module, container, api_version, options):
+ host_config = container['HostConfig']
+ config = container['Config']
+
+ # "ExposedPorts": null returns None type & causes AttributeError - PR #5517
+ if config.get('ExposedPorts') is not None:
+ expected_exposed = [_normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
+ else:
+ expected_exposed = []
+
+ return {
+ 'published_ports': host_config.get('PortBindings'),
+ 'exposed_ports': expected_exposed,
+ 'publish_all_ports': host_config.get('PublishAllPorts'),
+ }
+
+
+def _get_expected_values_ports(module, client, api_version, options, image, values):
+ expected_values = {}
+
+ if 'published_ports' in values:
+ expected_bound_ports = {}
+ for container_port, config in values['published_ports'].items():
+ if isinstance(container_port, int):
+ container_port = "%s/tcp" % container_port
+ if len(config) == 1:
+ if isinstance(config[0], int):
+ expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
+ elif isinstance(config[0], tuple):
+ expected_bound_ports[container_port] = []
+ for host_ip, host_port in config:
+ expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')})
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}]
+ expected_values['published_ports'] = expected_bound_ports
+
+ image_ports = []
+ if image:
+ image_exposed_ports = image['Config'].get('ExposedPorts') or {}
+ image_ports = [_normalize_port(p) for p in image_exposed_ports]
+ param_ports = []
+ if 'ports' in values:
+ param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in values['ports']]
+ result = list(set(image_ports + param_ports))
+ expected_values['exposed_ports'] = result
+
+ if 'publish_all_ports' in values:
+ expected_values['publish_all_ports'] = values['publish_all_ports']
+
+ return expected_values
+
+
+def _set_values_ports(module, data, api_version, options, values):
+ if 'ports' in values:
+ exposed_ports = {}
+ for port_definition in values['ports']:
+ port = port_definition
+ proto = 'tcp'
+ if isinstance(port_definition, tuple):
+ if len(port_definition) == 2:
+ proto = port_definition[1]
+ port = port_definition[0]
+ exposed_ports['%s/%s' % (port, proto)] = {}
+ data['ExposedPorts'] = exposed_ports
+ if 'published_ports' in values:
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['PortBindings'] = convert_port_bindings(values['published_ports'])
+ if 'publish_all_ports' in values and values['publish_all_ports']:
+ if 'HostConfig' not in data:
+ data['HostConfig'] = {}
+ data['HostConfig']['PublishAllPorts'] = values['publish_all_ports']
+
+
+def _preprocess_value_ports(module, client, api_version, options, values):
+ if 'published_ports' not in values:
+ return values
+ found = False
+ for port_spec in values['published_ports'].values():
+ if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING:
+ found = True
+ break
+ if not found:
+ return values
+ default_ip = _get_default_host_ip(module, client)
+ for port, port_spec in values['published_ports'].items():
+ if port_spec[0] == _DEFAULT_IP_REPLACEMENT_STRING:
+ values['published_ports'][port] = tuple([default_ip] + list(port_spec[1:]))
+ return values
+
+
+def _preprocess_container_names(module, client, api_version, value):
+ if value is None or not value.startswith('container:'):
+ return value
+ container_name = value[len('container:'):]
+ # Try to inspect container to see whether this is an ID or a
+ # name (and in the latter case, retrieve its ID)
+ container = client.get_container(container_name)
+ if container is None:
+ # If we can't find the container, issue a warning and continue with
+ # what the user specified.
+ module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
+ return value
+ return 'container:{0}'.format(container['Id'])
+
+
+OPTION_AUTO_REMOVE.add_engine('docker_api', DockerAPIEngine.host_config_value('AutoRemove'))
+
+OPTION_BLKIO_WEIGHT.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioWeight', update_parameter='BlkioWeight'))
+
+OPTION_CAPABILITIES.add_engine('docker_api', DockerAPIEngine.host_config_value('CapAdd'))
+
+OPTION_CAP_DROP.add_engine('docker_api', DockerAPIEngine.host_config_value('CapDrop'))
+
+OPTION_CGROUP_NS_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('CgroupnsMode', min_api_version='1.41'))
+
+OPTION_CGROUP_PARENT.add_engine('docker_api', DockerAPIEngine.host_config_value('CgroupParent'))
+
+OPTION_COMMAND.add_engine('docker_api', DockerAPIEngine.config_value('Cmd'))
+
+OPTION_CPU_PERIOD.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuPeriod', update_parameter='CpuPeriod'))
+
+OPTION_CPU_QUOTA.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuQuota', update_parameter='CpuQuota'))
+
+OPTION_CPUSET_CPUS.add_engine('docker_api', DockerAPIEngine.host_config_value('CpusetCpus', update_parameter='CpusetCpus'))
+
+OPTION_CPUSET_MEMS.add_engine('docker_api', DockerAPIEngine.host_config_value('CpusetMems', update_parameter='CpusetMems'))
+
+OPTION_CPU_SHARES.add_engine('docker_api', DockerAPIEngine.host_config_value('CpuShares', update_parameter='CpuShares'))
+
+OPTION_ENTRYPOINT.add_engine('docker_api', DockerAPIEngine.config_value('Entrypoint'))
+
+OPTION_CPUS.add_engine('docker_api', DockerAPIEngine.host_config_value('NanoCpus', preprocess_value=_preprocess_cpus))
+
+OPTION_DETACH_INTERACTIVE.add_engine('docker_api', DockerAPIEngine(get_value=_get_value_detach_interactive, set_value=_set_value_detach_interactive))
+
+OPTION_DEVICES.add_engine('docker_api', DockerAPIEngine.host_config_value('Devices', preprocess_value=_preprocess_devices))
+
+OPTION_DEVICE_READ_BPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceReadBps', preprocess_value=_preprocess_rate_bps))
+
+OPTION_DEVICE_WRITE_BPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceWriteBps', preprocess_value=_preprocess_rate_bps))
+
+OPTION_DEVICE_READ_IOPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceReadIOps', preprocess_value=_preprocess_rate_iops))
+
+OPTION_DEVICE_WRITE_IOPS.add_engine('docker_api', DockerAPIEngine.host_config_value('BlkioDeviceWriteIOps', preprocess_value=_preprocess_rate_iops))
+
+OPTION_DEVICE_REQUESTS.add_engine('docker_api', DockerAPIEngine.host_config_value(
+ 'DeviceRequests', min_api_version='1.40', preprocess_value=_preprocess_device_requests))
+
+OPTION_DNS_SERVERS.add_engine('docker_api', DockerAPIEngine.host_config_value('Dns'))
+
+OPTION_DNS_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('DnsOptions'))
+
+OPTION_DNS_SEARCH_DOMAINS.add_engine('docker_api', DockerAPIEngine.host_config_value('DnsSearch'))
+
+OPTION_DOMAINNAME.add_engine('docker_api', DockerAPIEngine.config_value('Domainname'))
+
+OPTION_ENVIRONMENT.add_engine('docker_api', DockerAPIEngine.config_value('Env', get_expected_value=_get_expected_env_value))
+
+OPTION_ETC_HOSTS.add_engine('docker_api', DockerAPIEngine.host_config_value('ExtraHosts', preprocess_value=_preprocess_etc_hosts))
+
+OPTION_GROUPS.add_engine('docker_api', DockerAPIEngine.host_config_value('GroupAdd'))
+
+OPTION_HEALTHCHECK.add_engine('docker_api', DockerAPIEngine.config_value(
+ 'Healthcheck', preprocess_value=_preprocess_healthcheck, postprocess_for_get=_postprocess_healthcheck_get_value))
+
+OPTION_HOSTNAME.add_engine('docker_api', DockerAPIEngine.config_value('Hostname'))
+
+OPTION_IMAGE.add_engine('docker_api', DockerAPIEngine.config_value(
+ 'Image', ignore_mismatching_result=lambda module, client, api_version, option, image, container_value, expected_value: True))
+
+OPTION_INIT.add_engine('docker_api', DockerAPIEngine.host_config_value('Init'))
+
+OPTION_IPC_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('IpcMode', preprocess_value=_preprocess_container_names))
+
+OPTION_KERNEL_MEMORY.add_engine('docker_api', DockerAPIEngine.host_config_value('KernelMemory', update_parameter='KernelMemory'))
+
+OPTION_LABELS.add_engine('docker_api', DockerAPIEngine.config_value(
+ 'Labels', get_expected_value=_get_expected_labels_value, ignore_mismatching_result=_ignore_mismatching_label_result))
+
+OPTION_LINKS.add_engine('docker_api', DockerAPIEngine.host_config_value('Links', preprocess_value=_preprocess_links))
+
+OPTION_LOG_DRIVER_OPTIONS.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_log,
+ set_value=_set_values_log,
+))
+
+OPTION_MAC_ADDRESS.add_engine('docker_api', DockerAPIEngine.config_value('MacAddress'))
+
+OPTION_MEMORY.add_engine('docker_api', DockerAPIEngine.host_config_value('Memory', update_parameter='Memory'))
+
+OPTION_MEMORY_RESERVATION.add_engine('docker_api', DockerAPIEngine.host_config_value('MemoryReservation', update_parameter='MemoryReservation'))
+
+OPTION_MEMORY_SWAP.add_engine('docker_api', DockerAPIEngine.host_config_value('MemorySwap', update_parameter='MemorySwap'))
+
+OPTION_MEMORY_SWAPPINESS.add_engine('docker_api', DockerAPIEngine.host_config_value('MemorySwappiness'))
+
+OPTION_STOP_TIMEOUT.add_engine('docker_api', DockerAPIEngine.config_value('StopTimeout'))
+
+OPTION_NETWORK.add_engine('docker_api', DockerAPIEngine(
+ preprocess_value=_preprocess_network_values,
+ get_value=_get_values_network,
+ set_value=_set_values_network,
+ ignore_mismatching_result=_ignore_mismatching_network_result,
+))
+
+OPTION_OOM_KILLER.add_engine('docker_api', DockerAPIEngine.host_config_value('OomKillDisable'))
+
+OPTION_OOM_SCORE_ADJ.add_engine('docker_api', DockerAPIEngine.host_config_value('OomScoreAdj'))
+
+OPTION_PID_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('PidMode', preprocess_value=_preprocess_container_names))
+
+OPTION_PIDS_LIMIT.add_engine('docker_api', DockerAPIEngine.host_config_value('PidsLimit'))
+
+OPTION_PLATFORM.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_platform,
+ set_value=_set_values_platform,
+ min_api_version='1.41',
+))
+
+OPTION_PRIVILEGED.add_engine('docker_api', DockerAPIEngine.host_config_value('Privileged'))
+
+OPTION_READ_ONLY.add_engine('docker_api', DockerAPIEngine.host_config_value('ReadonlyRootfs'))
+
+OPTION_RESTART_POLICY.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_restart,
+ set_value=_set_values_restart,
+ update_value=_update_value_restart,
+))
+
+OPTION_RUNTIME.add_engine('docker_api', DockerAPIEngine.host_config_value('Runtime'))
+
+OPTION_SECURITY_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('SecurityOpt'))
+
+OPTION_SHM_SIZE.add_engine('docker_api', DockerAPIEngine.host_config_value('ShmSize'))
+
+OPTION_STOP_SIGNAL.add_engine('docker_api', DockerAPIEngine.config_value('StopSignal'))
+
+OPTION_STORAGE_OPTS.add_engine('docker_api', DockerAPIEngine.host_config_value('StorageOpt'))
+
+OPTION_SYSCTLS.add_engine('docker_api', DockerAPIEngine.host_config_value('Sysctls'))
+
+OPTION_TMPFS.add_engine('docker_api', DockerAPIEngine.host_config_value('Tmpfs'))
+
+OPTION_TTY.add_engine('docker_api', DockerAPIEngine.config_value('Tty'))
+
+OPTION_ULIMITS.add_engine('docker_api', DockerAPIEngine.host_config_value('Ulimits'))
+
+OPTION_USER.add_engine('docker_api', DockerAPIEngine.config_value('User'))
+
+OPTION_USERNS_MODE.add_engine('docker_api', DockerAPIEngine.host_config_value('UsernsMode'))
+
+OPTION_UTS.add_engine('docker_api', DockerAPIEngine.host_config_value('UTSMode'))
+
+OPTION_VOLUME_DRIVER.add_engine('docker_api', DockerAPIEngine.host_config_value('VolumeDriver'))
+
+OPTION_VOLUMES_FROM.add_engine('docker_api', DockerAPIEngine.host_config_value('VolumesFrom'))
+
+OPTION_WORKING_DIR.add_engine('docker_api', DockerAPIEngine.config_value('WorkingDir'))
+
+OPTION_MOUNTS_VOLUMES.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_mounts,
+ get_expected_values=_get_expected_values_mounts,
+ set_value=_set_values_mounts,
+))
+
+OPTION_PORTS.add_engine('docker_api', DockerAPIEngine(
+ get_value=_get_values_ports,
+ get_expected_values=_get_expected_values_ports,
+ set_value=_set_values_ports,
+ preprocess_value=_preprocess_value_ports,
+))
diff --git a/ansible_collections/community/docker/plugins/module_utils/module_container/module.py b/ansible_collections/community/docker/plugins/module_utils/module_container/module.py
new file mode 100644
index 000000000..230dbfb40
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/module_container/module.py
@@ -0,0 +1,843 @@
+# Copyright (c) 2022 Felix Fontein <felix@fontein.de>
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+import re
+from time import sleep
+
+from ansible.module_utils.common.text.converters import to_native, to_text
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DifferenceTracker,
+ DockerBaseClass,
+ compare_generic,
+ is_image_name_id,
+ sanitize_result,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag
+
+
+class Container(DockerBaseClass):
+ def __init__(self, container, engine_driver):
+ super(Container, self).__init__()
+ self.raw = container
+ self.id = None
+ self.image = None
+ self.image_name = None
+ self.container = container
+ self.engine_driver = engine_driver
+ if container:
+ self.id = engine_driver.get_container_id(container)
+ self.image = engine_driver.get_image_from_container(container)
+ self.image_name = engine_driver.get_image_name_from_container(container)
+ self.log(self.container, pretty_print=True)
+
+ @property
+ def exists(self):
+ return True if self.container else False
+
+ @property
+ def removing(self):
+ return self.engine_driver.is_container_removing(self.container) if self.container else False
+
+ @property
+ def running(self):
+ return self.engine_driver.is_container_running(self.container) if self.container else False
+
+ @property
+ def paused(self):
+ return self.engine_driver.is_container_paused(self.container) if self.container else False
+
+
+class ContainerManager(DockerBaseClass):
+ def __init__(self, module, engine_driver, client, active_options):
+ self.module = module
+ self.engine_driver = engine_driver
+ self.client = client
+ self.options = active_options
+ self.all_options = self._collect_all_options(active_options)
+ self.check_mode = self.module.check_mode
+ self.param_cleanup = self.module.params['cleanup']
+ self.param_container_default_behavior = self.module.params['container_default_behavior']
+ self.param_default_host_ip = self.module.params['default_host_ip']
+ self.param_debug = self.module.params['debug']
+ self.param_force_kill = self.module.params['force_kill']
+ self.param_image = self.module.params['image']
+ self.param_image_comparison = self.module.params['image_comparison']
+ self.param_image_label_mismatch = self.module.params['image_label_mismatch']
+ self.param_image_name_mismatch = self.module.params['image_name_mismatch']
+ self.param_keep_volumes = self.module.params['keep_volumes']
+ self.param_kill_signal = self.module.params['kill_signal']
+ self.param_name = self.module.params['name']
+ self.param_networks_cli_compatible = self.module.params['networks_cli_compatible']
+ self.param_output_logs = self.module.params['output_logs']
+ self.param_paused = self.module.params['paused']
+ self.param_pull = self.module.params['pull']
+ self.param_recreate = self.module.params['recreate']
+ self.param_removal_wait_timeout = self.module.params['removal_wait_timeout']
+ self.param_restart = self.module.params['restart']
+ self.param_state = self.module.params['state']
+ self._parse_comparisons()
+ self._update_params()
+ self.results = {'changed': False, 'actions': []}
+ self.diff = {}
+ self.diff_tracker = DifferenceTracker()
+ self.facts = {}
+ if self.param_default_host_ip:
+ valid_ip = False
+ if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.param_default_host_ip):
+ valid_ip = True
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', self.param_default_host_ip):
+ valid_ip = True
+ if re.match(r'^[0-9a-fA-F:]+$', self.param_default_host_ip):
+ self.param_default_host_ip = '[{0}]'.format(self.param_default_host_ip)
+ valid_ip = True
+ if not valid_ip:
+ self.fail('The value of default_host_ip must be an empty string, an IPv4 address, '
+ 'or an IPv6 address. Got "{0}" instead.'.format(self.param_default_host_ip))
+
+ def _collect_all_options(self, active_options):
+ all_options = {}
+ for options in active_options:
+ for option in options.options:
+ all_options[option.name] = option
+ return all_options
+
+ def _collect_all_module_params(self):
+ all_module_options = set()
+ for option, data in self.module.argument_spec.items():
+ all_module_options.add(option)
+ if 'aliases' in data:
+ for alias in data['aliases']:
+ all_module_options.add(alias)
+ return all_module_options
+
+ def _parse_comparisons(self):
+ # Keep track of all module params and all option aliases
+ all_module_options = self._collect_all_module_params()
+ comp_aliases = {}
+ for option_name, option in self.all_options.items():
+ if option.not_an_ansible_option:
+ continue
+ comp_aliases[option_name] = option_name
+ for alias in option.ansible_aliases:
+ comp_aliases[alias] = option_name
+ # Process legacy ignore options
+ if self.module.params['ignore_image']:
+ self.all_options['image'].comparison = 'ignore'
+ if self.module.params['purge_networks']:
+ self.all_options['networks'].comparison = 'strict'
+ # Process comparsions specified by user
+ if self.module.params.get('comparisons'):
+ # If '*' appears in comparisons, process it first
+ if '*' in self.module.params['comparisons']:
+ value = self.module.params['comparisons']['*']
+ if value not in ('strict', 'ignore'):
+ self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
+ for option in self.all_options.values():
+ if option.name == 'networks':
+ # `networks` is special: only update if
+ # some value is actually specified
+ if self.module.params['networks'] is None:
+ continue
+ option.comparison = value
+ # Now process all other comparisons.
+ comp_aliases_used = {}
+ for key, value in self.module.params['comparisons'].items():
+ if key == '*':
+ continue
+ # Find main key
+ key_main = comp_aliases.get(key)
+ if key_main is None:
+ if key_main in all_module_options:
+ self.fail("The module option '%s' cannot be specified in the comparisons dict, "
+ "since it does not correspond to container's state!" % key)
+ if key not in self.all_options or self.all_options[key].not_an_ansible_option:
+ self.fail("Unknown module option '%s' in comparisons dict!" % key)
+ key_main = key
+ if key_main in comp_aliases_used:
+ self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
+ comp_aliases_used[key_main] = key
+ # Check value and update accordingly
+ if value in ('strict', 'ignore'):
+ self.all_options[key_main].comparison = value
+ elif value == 'allow_more_present':
+ if self.all_options[key_main].comparison_type == 'value':
+ self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
+ self.all_options[key_main].comparison = value
+ else:
+ self.fail("Unknown comparison mode '%s'!" % value)
+ # Copy values
+ for option in self.all_options.values():
+ if option.copy_comparison_from is not None:
+ option.comparison = self.all_options[option.copy_comparison_from].comparison
+ # Check legacy values
+ if self.module.params['ignore_image'] and self.all_options['image'].comparison != 'ignore':
+ self.module.warn('The ignore_image option has been overridden by the comparisons option!')
+ if self.module.params['purge_networks'] and self.all_options['networks'].comparison != 'strict':
+ self.module.warn('The purge_networks option has been overridden by the comparisons option!')
+
+ def _update_params(self):
+ if self.param_networks_cli_compatible is True and self.module.params['networks'] and self.module.params['network_mode'] is None:
+ # Same behavior as Docker CLI: if networks are specified, use the name of the first network as the value for network_mode
+ # (assuming no explicit value is specified for network_mode)
+ self.module.params['network_mode'] = self.module.params['networks'][0]['name']
+ if self.param_container_default_behavior == 'compatibility':
+ old_default_values = dict(
+ auto_remove=False,
+ detach=True,
+ init=False,
+ interactive=False,
+ memory='0',
+ paused=False,
+ privileged=False,
+ read_only=False,
+ tty=False,
+ )
+ for param, value in old_default_values.items():
+ if self.module.params[param] is None:
+ self.module.params[param] = value
+
+ def fail(self, *args, **kwargs):
+ self.client.fail(*args, **kwargs)
+
+ def run(self):
+ if self.param_state in ('stopped', 'started', 'present'):
+ self.present(self.param_state)
+ elif self.param_state == 'absent':
+ self.absent()
+
+ if not self.check_mode and not self.param_debug:
+ self.results.pop('actions')
+
+ if self.module._diff or self.param_debug:
+ self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff
+
+ if self.facts:
+ self.results['container'] = self.facts
+
+ def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
+ delay = 1.0
+ total_wait = 0
+ while True:
+ # Inspect container
+ result = self.engine_driver.inspect_container_by_id(self.client, container_id)
+ if result is None:
+ if accept_removal:
+ return
+ msg = 'Encontered vanished container while waiting for container "{0}"'
+ self.fail(msg.format(container_id))
+ # Check container state
+ state = result.get('State', {}).get('Status')
+ if complete_states is not None and state in complete_states:
+ return
+ if wait_states is not None and state not in wait_states:
+ msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
+ self.fail(msg.format(container_id, state))
+ # Wait
+ if max_wait is not None:
+ if total_wait > max_wait:
+ msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
+ self.fail(msg.format(container_id, max_wait))
+ if total_wait + delay > max_wait:
+ delay = max_wait - total_wait
+ sleep(delay)
+ total_wait += delay
+ # Exponential backoff, but never wait longer than 10 seconds
+ # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
+ # until the maximal 10 seconds delay is reached. By then, the
+ # code will have slept for ~1.5 minutes.)
+ delay = min(delay * 1.1, 10)
+
+ def _collect_params(self, active_options):
+ parameters = []
+ for options in active_options:
+ values = {}
+ engine = options.get_engine(self.engine_driver.name)
+ for option in options.all_options:
+ if not option.not_an_ansible_option and self.module.params[option.name] is not None:
+ values[option.name] = self.module.params[option.name]
+ values = options.preprocess(self.module, values)
+ engine.preprocess_value(self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, values)
+ parameters.append((options, values))
+ return parameters
+
+ def present(self, state):
+ self.parameters = self._collect_params(self.options)
+ container = self._get_container(self.param_name)
+ was_running = container.running
+ was_paused = container.paused
+ container_created = False
+
+ # If the image parameter was passed then we need to deal with the image
+ # version comparison. Otherwise we handle this depending on whether
+ # the container already runs or not; in the former case, in case the
+ # container needs to be restarted, we use the existing container's
+ # image ID.
+ image, comparison_image = self._get_image(container)
+ self.log(image, pretty_print=True)
+ if not container.exists or container.removing:
+ # New container
+ if container.removing:
+ self.log('Found container in removal phase')
+ else:
+ self.log('No container found')
+ if not self.param_image:
+ self.fail('Cannot create container when image is not specified!')
+ self.diff_tracker.add('exists', parameter=True, active=False)
+ if container.removing and not self.check_mode:
+ # Wait for container to be removed before trying to create it
+ self.wait_for_state(
+ container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout)
+ new_container = self.container_create(self.param_image)
+ if new_container:
+ container = new_container
+ container_created = True
+ else:
+ # Existing container
+ different, differences = self.has_different_configuration(container, comparison_image)
+ image_different = False
+ if self.all_options['image'].comparison == 'strict':
+ image_different = self._image_is_different(image, container)
+ if self.param_image_name_mismatch == 'recreate' and self.param_image is not None and self.param_image != container.image_name:
+ different = True
+ self.diff_tracker.add('image_name', parameter=self.param_image, active=container.image_name)
+ if image_different or different or self.param_recreate:
+ self.diff_tracker.merge(differences)
+ self.diff['differences'] = differences.get_legacy_docker_container_diffs()
+ if image_different:
+ self.diff['image_different'] = True
+ self.log("differences")
+ self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
+ image_to_use = self.param_image
+ if not image_to_use and container and container.image:
+ image_to_use = container.image
+ if not image_to_use:
+ self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
+ if container.running:
+ self.container_stop(container.id)
+ self.container_remove(container.id)
+ if not self.check_mode:
+ self.wait_for_state(
+ container.id, wait_states=['removing'], accept_removal=True, max_wait=self.param_removal_wait_timeout)
+ new_container = self.container_create(image_to_use)
+ if new_container:
+ container = new_container
+ container_created = True
+ comparison_image = image
+
+ if container and container.exists:
+ container = self.update_limits(container, comparison_image)
+ container = self.update_networks(container, container_created)
+
+ if state == 'started' and not container.running:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ container = self.container_start(container.id)
+ elif state == 'started' and self.param_restart:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ self.diff_tracker.add('restarted', parameter=True, active=False)
+ container = self.container_restart(container.id)
+ elif state == 'stopped' and container.running:
+ self.diff_tracker.add('running', parameter=False, active=was_running)
+ self.container_stop(container.id)
+ container = self._get_container(container.id)
+
+ if state == 'started' and self.param_paused is not None and container.paused != self.param_paused:
+ self.diff_tracker.add('paused', parameter=self.param_paused, active=was_paused)
+ if not self.check_mode:
+ try:
+ if self.param_paused:
+ self.engine_driver.pause_container(self.client, container.id)
+ else:
+ self.engine_driver.unpause_container(self.client, container.id)
+ except Exception as exc:
+ self.fail("Error %s container %s: %s" % (
+ "pausing" if self.param_paused else "unpausing", container.id, to_native(exc)
+ ))
+ container = self._get_container(container.id)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(set_paused=self.param_paused))
+
+ self.facts = container.raw
+
+ def absent(self):
+ container = self._get_container(self.param_name)
+ if container.exists:
+ if container.running:
+ self.diff_tracker.add('running', parameter=False, active=True)
+ self.container_stop(container.id)
+ self.diff_tracker.add('exists', parameter=False, active=True)
+ self.container_remove(container.id)
+
+ def _output_logs(self, msg):
+ self.module.log(msg=msg)
+
+ def _get_container(self, container):
+ '''
+ Expects container ID or Name. Returns a container object
+ '''
+ container = self.engine_driver.inspect_container_by_name(self.client, container)
+ return Container(container, self.engine_driver)
+
+ def _get_container_image(self, container, fallback=None):
+ if not container.exists or container.removing:
+ return fallback
+ image = container.image
+ if is_image_name_id(image):
+ image = self.engine_driver.inspect_image_by_id(self.client, image)
+ else:
+ repository, tag = parse_repository_tag(image)
+ if not tag:
+ tag = "latest"
+ image = self.engine_driver.inspect_image_by_name(self.client, repository, tag)
+ return image or fallback
+
+ def _get_image(self, container):
+ image_parameter = self.param_image
+ if not image_parameter:
+ self.log('No image specified')
+ return None, self._get_container_image(container)
+ if is_image_name_id(image_parameter):
+ image = self.engine_driver.inspect_image_by_id(self.client, image_parameter)
+ else:
+ repository, tag = parse_repository_tag(image_parameter)
+ if not tag:
+ tag = "latest"
+ image = self.engine_driver.inspect_image_by_name(self.client, repository, tag)
+ if not image or self.param_pull:
+ if not self.check_mode:
+ self.log("Pull the image.")
+ image, alreadyToLatest = self.engine_driver.pull_image(
+ self.client, repository, tag, platform=self.module.params['platform'])
+ if alreadyToLatest:
+ self.results['changed'] = False
+ else:
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+ elif not image:
+ # If the image isn't there, claim we'll pull.
+ # (Implicitly: if the image is there, claim it already was latest.)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+
+ self.log("image")
+ self.log(image, pretty_print=True)
+
+ comparison_image = image
+ if self.param_image_comparison == 'current-image':
+ comparison_image = self._get_container_image(container, image)
+ if comparison_image != image:
+ self.log("current image")
+ self.log(comparison_image, pretty_print=True)
+
+ return image, comparison_image
+
+ def _image_is_different(self, image, container):
+ if image and image.get('Id'):
+ if container and container.image:
+ if image.get('Id') != container.image:
+ self.diff_tracker.add('image', parameter=image.get('Id'), active=container.image)
+ return True
+ return False
+
+ def _compose_create_parameters(self, image):
+ params = {}
+ for options, values in self.parameters:
+ engine = options.get_engine(self.engine_driver.name)
+ if engine.can_set_value(self.engine_driver.get_api_version(self.client)):
+ engine.set_value(self.module, params, self.engine_driver.get_api_version(self.client), options.options, values)
+ params['Image'] = image
+ return params
+
+ def _record_differences(self, differences, options, param_values, engine, container, image):
+ container_values = engine.get_value(self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options)
+ expected_values = engine.get_expected_values(
+ self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy())
+ for option in options.options:
+ if option.name in expected_values:
+ param_value = expected_values[option.name]
+ container_value = container_values.get(option.name)
+ match = compare_generic(param_value, container_value, option.comparison, option.comparison_type)
+
+ if not match:
+ # No match.
+ if engine.ignore_mismatching_result(self.module, self.client, self.engine_driver.get_api_version(self.client),
+ option, image, container_value, param_value):
+ # Ignore the result
+ continue
+
+ # Record the differences
+ p = param_value
+ c = container_value
+ if option.comparison_type == 'set':
+ # Since the order does not matter, sort so that the diff output is better.
+ if p is not None:
+ p = sorted(p)
+ if c is not None:
+ c = sorted(c)
+ elif option.comparison_type == 'set(dict)':
+ # Since the order does not matter, sort so that the diff output is better.
+ if option.name == 'expected_mounts':
+ # For selected values, use one entry as key
+ def sort_key_fn(x):
+ return x['target']
+ else:
+ # We sort the list of dictionaries by using the sorted items of a dict as its key.
+ def sort_key_fn(x):
+ return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
+ if p is not None:
+ p = sorted(p, key=sort_key_fn)
+ if c is not None:
+ c = sorted(c, key=sort_key_fn)
+ differences.add(option.name, parameter=p, active=c)
+
+ def has_different_configuration(self, container, image):
+ differences = DifferenceTracker()
+ update_differences = DifferenceTracker()
+ for options, param_values in self.parameters:
+ engine = options.get_engine(self.engine_driver.name)
+ if engine.can_update_value(self.engine_driver.get_api_version(self.client)):
+ self._record_differences(update_differences, options, param_values, engine, container, image)
+ else:
+ self._record_differences(differences, options, param_values, engine, container, image)
+ has_differences = not differences.empty
+ # Only consider differences of properties that can be updated when there are also other differences
+ if has_differences:
+ differences.merge(update_differences)
+ return has_differences, differences
+
+ def has_different_resource_limits(self, container, image):
+ differences = DifferenceTracker()
+ for options, param_values in self.parameters:
+ engine = options.get_engine(self.engine_driver.name)
+ if not engine.can_update_value(self.engine_driver.get_api_version(self.client)):
+ continue
+ self._record_differences(differences, options, param_values, engine, container, image)
+ has_differences = not differences.empty
+ return has_differences, differences
+
+ def _compose_update_parameters(self):
+ result = {}
+ for options, values in self.parameters:
+ engine = options.get_engine(self.engine_driver.name)
+ if not engine.can_update_value(self.engine_driver.get_api_version(self.client)):
+ continue
+ engine.update_value(self.module, result, self.engine_driver.get_api_version(self.client), options.options, values)
+ return result
+
+ def update_limits(self, container, image):
+ limits_differ, different_limits = self.has_different_resource_limits(container, image)
+ if limits_differ:
+ self.log("limit differences:")
+ self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
+ self.diff_tracker.merge(different_limits)
+ if limits_differ and not self.check_mode:
+ self.container_update(container.id, self._compose_update_parameters())
+ return self._get_container(container.id)
+ return container
+
+ def has_network_differences(self, container):
+ '''
+ Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
+ '''
+ different = False
+ differences = []
+
+ if not self.module.params['networks']:
+ return different, differences
+
+ if not container.container.get('NetworkSettings'):
+ self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = container.container['NetworkSettings']['Networks']
+ for network in self.module.params['networks']:
+ network_info = connected_networks.get(network['name'])
+ if network_info is None:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=None
+ ))
+ else:
+ diff = False
+ network_info_ipam = network_info.get('IPAMConfig') or {}
+ if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
+ diff = True
+ if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
+ diff = True
+ if network.get('aliases'):
+ if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
+ diff = True
+ if network.get('links'):
+ expected_links = []
+ for link, alias in network['links']:
+ expected_links.append("%s:%s" % (link, alias))
+ if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
+ diff = True
+ if diff:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=dict(
+ name=network['name'],
+ ipv4_address=network_info_ipam.get('IPv4Address'),
+ ipv6_address=network_info_ipam.get('IPv6Address'),
+ aliases=network_info.get('Aliases'),
+ links=network_info.get('Links')
+ )
+ ))
+ return different, differences
+
+ def has_extra_networks(self, container):
+ '''
+ Check if the container is connected to non-requested networks
+ '''
+ extra_networks = []
+ extra = False
+
+ if not container.container.get('NetworkSettings'):
+ self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = container.container['NetworkSettings'].get('Networks')
+ if connected_networks:
+ for network, network_config in connected_networks.items():
+ keep = False
+ if self.module.params['networks']:
+ for expected_network in self.module.params['networks']:
+ if expected_network['name'] == network:
+ keep = True
+ if not keep:
+ extra = True
+ extra_networks.append(dict(name=network, id=network_config['NetworkID']))
+ return extra, extra_networks
+
+ def update_networks(self, container, container_created):
+ updated_container = container
+ if self.all_options['networks'].comparison != 'ignore' or container_created:
+ has_network_differences, network_differences = self.has_network_differences(container)
+ if has_network_differences:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(network_differences=network_differences))
+ else:
+ self.diff['differences'] = [dict(network_differences=network_differences)]
+ for netdiff in network_differences:
+ self.diff_tracker.add(
+ 'network.{0}'.format(netdiff['parameter']['name']),
+ parameter=netdiff['parameter'],
+ active=netdiff['container']
+ )
+ self.results['changed'] = True
+ updated_container = self._add_networks(container, network_differences)
+
+ purge_networks = self.all_options['networks'].comparison == 'strict' and self.module.params['networks'] is not None
+ if not purge_networks and self.module.params['purge_networks']:
+ purge_networks = True
+ self.module.deprecate(
+ 'The purge_networks option is used while networks is not specified. In this case purge_networks=true cannot'
+ ' be replaced by `networks: strict` in comparisons, which is necessary once purge_networks is removed.'
+ ' Please modify the docker_container invocation by adding `networks: []`',
+ version='4.0.0', collection_name='community.docker')
+ if purge_networks:
+ has_extra_networks, extra_networks = self.has_extra_networks(container)
+ if has_extra_networks:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(purge_networks=extra_networks))
+ else:
+ self.diff['differences'] = [dict(purge_networks=extra_networks)]
+ for extra_network in extra_networks:
+ self.diff_tracker.add(
+ 'network.{0}'.format(extra_network['name']),
+ active=extra_network
+ )
+ self.results['changed'] = True
+ updated_container = self._purge_networks(container, extra_networks)
+ return updated_container
+
+ def _add_networks(self, container, differences):
+ for diff in differences:
+ # remove the container from the network, if connected
+ if diff.get('container'):
+ self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
+ if not self.check_mode:
+ try:
+ self.engine_driver.disconnect_container_from_network(self.client, container.id, diff['parameter']['id'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
+ to_native(exc)))
+ # connect to the network
+ self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=diff['parameter']))
+ if not self.check_mode:
+ params = {key: value for key, value in diff['parameter'].items() if key not in ('id', 'name')}
+ try:
+ self.log("Connecting container to network %s" % diff['parameter']['id'])
+ self.log(params, pretty_print=True)
+ self.engine_driver.connect_container_to_network(self.client, container.id, diff['parameter']['id'], params)
+ except Exception as exc:
+ self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
+ return self._get_container(container.id)
+
+ def _purge_networks(self, container, networks):
+ for network in networks:
+ self.results['actions'].append(dict(removed_from_network=network['name']))
+ if not self.check_mode:
+ try:
+ self.engine_driver.disconnect_container_from_network(self.client, container.id, network['name'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (network['name'],
+ to_native(exc)))
+ return self._get_container(container.id)
+
+ def container_create(self, image):
+ create_parameters = self._compose_create_parameters(image)
+ self.log("create container")
+ self.log("image: %s parameters:" % image)
+ self.log(create_parameters, pretty_print=True)
+ self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
+ self.results['changed'] = True
+ new_container = None
+ if not self.check_mode:
+ try:
+ container_id = self.engine_driver.create_container(self.client, self.param_name, create_parameters)
+ except Exception as exc:
+ self.fail("Error creating container: %s" % to_native(exc))
+ return self._get_container(container_id)
+ return new_container
+
+ def container_start(self, container_id):
+ self.log("start container %s" % (container_id))
+ self.results['actions'].append(dict(started=container_id))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.start_container(self.client, container_id)
+ except Exception as exc:
+ self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
+
+ if self.module.params['detach'] is False:
+ status = self.engine_driver.wait_for_container(self.client, container_id)
+ self.client.fail_results['status'] = status
+ self.results['status'] = status
+
+ if self.module.params['auto_remove']:
+ output = "Cannot retrieve result as auto_remove is enabled"
+ if self.param_output_logs:
+ self.module.warn('Cannot output_logs if auto_remove is enabled!')
+ else:
+ output, real_output = self.engine_driver.get_container_output(self.client, container_id)
+ if real_output and self.param_output_logs:
+ self._output_logs(msg=output)
+
+ if self.param_cleanup:
+ self.container_remove(container_id, force=True)
+ insp = self._get_container(container_id)
+ if insp.raw:
+ insp.raw['Output'] = output
+ else:
+ insp.raw = dict(Output=output)
+ if status != 0:
+ # Set `failed` to True and return output as msg
+ self.results['failed'] = True
+ self.results['msg'] = output
+ return insp
+ return self._get_container(container_id)
+
+ def container_remove(self, container_id, link=False, force=False):
+ volume_state = (not self.param_keep_volumes)
+ self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
+ self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.remove_container(self.client, container_id, remove_volumes=volume_state, link=link, force=force)
+ except Exception as exc:
+ self.client.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+
+ def container_update(self, container_id, update_parameters):
+ if update_parameters:
+ self.log("update container %s" % (container_id))
+ self.log(update_parameters, pretty_print=True)
+ self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.update_container(self.client, container_id, update_parameters)
+ except Exception as exc:
+ self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_kill(self, container_id):
+ self.results['actions'].append(dict(killed=container_id, signal=self.param_kill_signal))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.kill_container(self.client, container_id, kill_signal=self.param_kill_signal)
+ except Exception as exc:
+ self.fail("Error killing container %s: %s" % (container_id, to_native(exc)))
+
+ def container_restart(self, container_id):
+ self.results['actions'].append(dict(restarted=container_id, timeout=self.module.params['stop_timeout']))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.restart_container(self.client, container_id, self.module.params['stop_timeout'] or 10)
+ except Exception as exc:
+ self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_stop(self, container_id):
+ if self.param_force_kill:
+ self.container_kill(container_id)
+ return
+ self.results['actions'].append(dict(stopped=container_id, timeout=self.module.params['stop_timeout']))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.engine_driver.stop_container(self.client, container_id, self.module.params['stop_timeout'])
+ except Exception as exc:
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+
+
+def run_module(engine_driver):
+ module, active_options, client = engine_driver.setup(
+ argument_spec=dict(
+ cleanup=dict(type='bool', default=False),
+ comparisons=dict(type='dict'),
+ container_default_behavior=dict(type='str', default='no_defaults', choices=['compatibility', 'no_defaults']),
+ command_handling=dict(type='str', choices=['compatibility', 'correct'], default='correct'),
+ default_host_ip=dict(type='str'),
+ force_kill=dict(type='bool', default=False, aliases=['forcekill']),
+ ignore_image=dict(type='bool', default=False, removed_in_version='4.0.0', removed_from_collection='community.docker'),
+ image=dict(type='str'),
+ image_comparison=dict(type='str', choices=['desired-image', 'current-image'], default='desired-image'),
+ image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'),
+ image_name_mismatch=dict(type='str', choices=['ignore', 'recreate'], default='ignore'),
+ keep_volumes=dict(type='bool', default=True),
+ kill_signal=dict(type='str'),
+ name=dict(type='str', required=True),
+ networks_cli_compatible=dict(type='bool', default=True),
+ output_logs=dict(type='bool', default=False),
+ paused=dict(type='bool'),
+ pull=dict(type='bool', default=False),
+ purge_networks=dict(type='bool', default=False, removed_in_version='4.0.0', removed_from_collection='community.docker'),
+ recreate=dict(type='bool', default=False),
+ removal_wait_timeout=dict(type='float'),
+ restart=dict(type='bool', default=False),
+ state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
+ ),
+ required_if=[
+ ('state', 'present', ['image'])
+ ],
+ )
+
+ def execute():
+ cm = ContainerManager(module, engine_driver, client, active_options)
+ cm.run()
+ module.exit_json(**sanitize_result(cm.results))
+
+ engine_driver.run(execute, client)
diff --git a/ansible_collections/community/docker/plugins/module_utils/socket_handler.py b/ansible_collections/community/docker/plugins/module_utils/socket_handler.py
new file mode 100644
index 000000000..878dc7c53
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/socket_handler.py
@@ -0,0 +1,206 @@
+# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import os
+import os.path
+import socket as pysocket
+import struct
+
+from ansible.module_utils.six import PY2
+
+from ansible_collections.community.docker.plugins.module_utils._api.utils import socket as docker_socket
+
+from ansible_collections.community.docker.plugins.module_utils.socket_helper import (
+ make_unblocking,
+ shutdown_writing,
+ write_to_socket,
+)
+
+
+PARAMIKO_POLL_TIMEOUT = 0.01 # 10 milliseconds
+
+
+class DockerSocketHandlerBase(object):
+ def __init__(self, sock, selectors, log=None):
+ make_unblocking(sock)
+
+ self._selectors = selectors
+ if log is not None:
+ self._log = log
+ else:
+ self._log = lambda msg: True
+ self._paramiko_read_workaround = hasattr(sock, 'send_ready') and 'paramiko' in str(type(sock))
+
+ self._sock = sock
+ self._block_done_callback = None
+ self._block_buffer = []
+ self._eof = False
+ self._read_buffer = b''
+ self._write_buffer = b''
+ self._end_of_writing = False
+
+ self._current_stream = None
+ self._current_missing = 0
+ self._current_buffer = b''
+
+ self._selector = self._selectors.DefaultSelector()
+ self._selector.register(self._sock, self._selectors.EVENT_READ)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self._selector.close()
+
+ def set_block_done_callback(self, block_done_callback):
+ self._block_done_callback = block_done_callback
+ if self._block_done_callback is not None:
+ while self._block_buffer:
+ elt = self._block_buffer.remove(0)
+ self._block_done_callback(*elt)
+
+ def _add_block(self, stream_id, data):
+ if self._block_done_callback is not None:
+ self._block_done_callback(stream_id, data)
+ else:
+ self._block_buffer.append((stream_id, data))
+
+ def _read(self):
+ if self._eof:
+ return
+ if hasattr(self._sock, 'recv'):
+ try:
+ data = self._sock.recv(262144)
+ except Exception as e:
+ # After calling self._sock.shutdown(), OpenSSL's/urllib3's
+ # WrappedSocket seems to eventually raise ZeroReturnError in
+ # case of EOF
+ if 'OpenSSL.SSL.ZeroReturnError' in str(type(e)):
+ self._eof = True
+ return
+ else:
+ raise
+ elif not PY2 and isinstance(self._sock, getattr(pysocket, 'SocketIO')):
+ data = self._sock.read()
+ else:
+ data = os.read(self._sock.fileno())
+ if data is None:
+ # no data available
+ return
+ self._log('read {0} bytes'.format(len(data)))
+ if len(data) == 0:
+ # Stream EOF
+ self._eof = True
+ return
+ self._read_buffer += data
+ while len(self._read_buffer) > 0:
+ if self._current_missing > 0:
+ n = min(len(self._read_buffer), self._current_missing)
+ self._current_buffer += self._read_buffer[:n]
+ self._read_buffer = self._read_buffer[n:]
+ self._current_missing -= n
+ if self._current_missing == 0:
+ self._add_block(self._current_stream, self._current_buffer)
+ self._current_buffer = b''
+ if len(self._read_buffer) < 8:
+ break
+ self._current_stream, self._current_missing = struct.unpack('>BxxxL', self._read_buffer[:8])
+ self._read_buffer = self._read_buffer[8:]
+ if self._current_missing < 0:
+ # Stream EOF (as reported by docker daemon)
+ self._eof = True
+ break
+
+ def _handle_end_of_writing(self):
+ if self._end_of_writing and len(self._write_buffer) == 0:
+ self._end_of_writing = False
+ self._log('Shutting socket down for writing')
+ shutdown_writing(self._sock, self._log)
+
+ def _write(self):
+ if len(self._write_buffer) > 0:
+ written = write_to_socket(self._sock, self._write_buffer)
+ self._write_buffer = self._write_buffer[written:]
+ self._log('wrote {0} bytes, {1} are left'.format(written, len(self._write_buffer)))
+ if len(self._write_buffer) > 0:
+ self._selector.modify(self._sock, self._selectors.EVENT_READ | self._selectors.EVENT_WRITE)
+ else:
+ self._selector.modify(self._sock, self._selectors.EVENT_READ)
+ self._handle_end_of_writing()
+
+ def select(self, timeout=None, _internal_recursion=False):
+ if not _internal_recursion and self._paramiko_read_workaround and len(self._write_buffer) > 0:
+ # When the SSH transport is used, Docker SDK for Python internally uses Paramiko, whose
+ # Channel object supports select(), but only for reading
+ # (https://github.com/paramiko/paramiko/issues/695).
+ if self._sock.send_ready():
+ self._write()
+ return True
+ while timeout is None or timeout > PARAMIKO_POLL_TIMEOUT:
+ result = self.select(PARAMIKO_POLL_TIMEOUT, _internal_recursion=True)
+ if self._sock.send_ready():
+ self._read()
+ result += 1
+ if result > 0:
+ return True
+ if timeout is not None:
+ timeout -= PARAMIKO_POLL_TIMEOUT
+ self._log('select... ({0})'.format(timeout))
+ events = self._selector.select(timeout)
+ for key, event in events:
+ if key.fileobj == self._sock:
+ self._log(
+ 'select event read:{0} write:{1}'.format(
+ event & self._selectors.EVENT_READ != 0,
+ event & self._selectors.EVENT_WRITE != 0))
+ if event & self._selectors.EVENT_READ != 0:
+ self._read()
+ if event & self._selectors.EVENT_WRITE != 0:
+ self._write()
+ result = len(events)
+ if self._paramiko_read_workaround and len(self._write_buffer) > 0:
+ if self._sock.send_ready():
+ self._write()
+ result += 1
+ return result > 0
+
+ def is_eof(self):
+ return self._eof
+
+ def end_of_writing(self):
+ self._end_of_writing = True
+ self._handle_end_of_writing()
+
+ def consume(self):
+ stdout = []
+ stderr = []
+
+ def append_block(stream_id, data):
+ if stream_id == docker_socket.STDOUT:
+ stdout.append(data)
+ elif stream_id == docker_socket.STDERR:
+ stderr.append(data)
+ else:
+ raise ValueError('{0} is not a valid stream ID'.format(stream_id))
+
+ self.end_of_writing()
+
+ self.set_block_done_callback(append_block)
+ while not self._eof:
+ self.select()
+ return b''.join(stdout), b''.join(stderr)
+
+ def write(self, str):
+ self._write_buffer += str
+ if len(self._write_buffer) == len(str):
+ self._write()
+
+
+class DockerSocketHandlerModule(DockerSocketHandlerBase):
+ def __init__(self, sock, module, selectors):
+ super(DockerSocketHandlerModule, self).__init__(sock, selectors, module.debug)
diff --git a/ansible_collections/community/docker/plugins/module_utils/socket_helper.py b/ansible_collections/community/docker/plugins/module_utils/socket_helper.py
new file mode 100644
index 000000000..2148fe97d
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/socket_helper.py
@@ -0,0 +1,66 @@
+# Copyright (c) 2019-2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import fcntl
+import os
+import os.path
+import socket as pysocket
+
+from ansible.module_utils.six import PY2
+
+
+def make_file_unblocking(file):
+ fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fcntl.fcntl(file.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+
+
+def make_file_blocking(file):
+ fcntl.fcntl(file.fileno(), fcntl.F_SETFL, fcntl.fcntl(file.fileno(), fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+
+def make_unblocking(sock):
+ if hasattr(sock, '_sock'):
+ sock._sock.setblocking(0)
+ elif hasattr(sock, 'setblocking'):
+ sock.setblocking(0)
+ else:
+ make_file_unblocking(sock)
+
+
+def _empty_writer(msg):
+ pass
+
+
+def shutdown_writing(sock, log=_empty_writer):
+ # FIXME: This does **not work with SSLSocket**! Apparently SSLSocket does not allow to send
+ # a close_notify TLS alert without completely shutting down the connection.
+ # Calling sock.shutdown(pysocket.SHUT_WR) simply turns of TLS encryption and from that
+ # point on the raw encrypted data is returned when sock.recv() is called. :-(
+ if hasattr(sock, 'shutdown_write'):
+ sock.shutdown_write()
+ elif hasattr(sock, 'shutdown'):
+ try:
+ sock.shutdown(pysocket.SHUT_WR)
+ except TypeError as e:
+ # probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
+ log('Shutting down for writing not possible; trying shutdown instead: {0}'.format(e))
+ sock.shutdown()
+ elif not PY2 and isinstance(sock, getattr(pysocket, 'SocketIO')):
+ sock._sock.shutdown(pysocket.SHUT_WR)
+ else:
+ log('No idea how to signal end of writing')
+
+
+def write_to_socket(sock, data):
+ if hasattr(sock, '_send_until_done'):
+ # WrappedSocket (urllib3/contrib/pyopenssl) doesn't have `send`, but
+ # only `sendall`, which uses `_send_until_done` under the hood.
+ return sock._send_until_done(data)
+ elif hasattr(sock, 'send'):
+ return sock.send(data)
+ else:
+ return os.write(sock.fileno(), data)
diff --git a/ansible_collections/community/docker/plugins/module_utils/swarm.py b/ansible_collections/community/docker/plugins/module_utils/swarm.py
new file mode 100644
index 000000000..0dbc1e725
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/swarm.py
@@ -0,0 +1,281 @@
+# Copyright (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
+# Copyright (c) Thierry Bouvet (@tbouvet)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+from time import sleep
+
+try:
+ from docker.errors import APIError, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils.common import AnsibleDockerClient
+
+
+class AnsibleDockerSwarmClient(AnsibleDockerClient):
+
+ def __init__(self, **kwargs):
+ super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
+
+ def get_swarm_node_id(self):
+ """
+ Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
+ of Docker host the module is executed on
+ :return:
+ NodeID of host or 'None' if not part of Swarm
+ """
+
+ try:
+ info = self.info()
+ except APIError as exc:
+ self.fail("Failed to get node information for %s" % to_native(exc))
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return swarm_info['Swarm']['NodeID']
+ return None
+
+ def check_if_swarm_node(self, node_id=None):
+ """
+ Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
+ system information looking if specific key in output exists. If 'node_id' is provided then it tries to
+ read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
+ it is not executed on Swarm manager
+
+ :param node_id: Node identifier
+ :return:
+ bool: True if node is part of Swarm, False otherwise
+ """
+
+ if node_id is None:
+ try:
+ info = self.info()
+ except APIError:
+ self.fail("Failed to get host information.")
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return True
+ if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
+ return True
+ return False
+ else:
+ try:
+ node_info = self.get_node_inspect(node_id=node_id)
+ except APIError:
+ return
+
+ if node_info['ID'] is not None:
+ return True
+ return False
+
+ def check_if_swarm_manager(self):
+ """
+ Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
+ is performed. The inspect_swarm() will fail if node is not a manager
+
+ :return: True if node is Swarm Manager, False otherwise
+ """
+
+ try:
+ self.inspect_swarm()
+ return True
+ except APIError:
+ return False
+
+ def fail_task_if_not_swarm_manager(self):
+ """
+ If host is not a swarm manager then Ansible task on this host should end with 'failed' state
+ """
+ if not self.check_if_swarm_manager():
+ self.fail("Error running docker swarm module: must run on swarm manager node")
+
+ def check_if_swarm_worker(self):
+ """
+ Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
+ is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
+
+ :return: True if node is Swarm Worker, False otherwise
+ """
+
+ if self.check_if_swarm_node() and not self.check_if_swarm_manager():
+ return True
+ return False
+
+ def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
+ """
+ Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
+ node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
+ host that is not part of Swarm it will fail the playbook
+
+ :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ True if node is part of swarm but its state is down, False otherwise
+ """
+
+ if repeat_check < 1:
+ repeat_check = 1
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ for retry in range(0, repeat_check):
+ if retry > 0:
+ sleep(5)
+ node_info = self.get_node_inspect(node_id=node_id)
+ if node_info['Status']['State'] == 'down':
+ return True
+ return False
+
+ def get_node_inspect(self, node_id=None, skip_missing=False):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about single node
+
+ :param skip_missing: if True then function will return None instead of failing the task
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ Single node information structure
+ """
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ if node_id is None:
+ self.fail("Failed to get node information.")
+
+ try:
+ node_info = self.inspect_node(node_id=node_id)
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ if exc.status_code == 404:
+ if skip_missing:
+ return None
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+
+ if 'ManagerStatus' in node_info:
+ if node_info['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ count_colons = node_info['ManagerStatus']['Addr'].count(":")
+ if count_colons == 1:
+ swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
+ else:
+ swarm_leader_ip = node_info['Status']['Addr']
+ node_info['Status']['Addr'] = swarm_leader_ip
+ return node_info
+
+ def get_all_nodes_inspect(self):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about all registered nodes
+
+ :return:
+ Structure with information about all nodes
+ """
+ try:
+ node_info = self.nodes()
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+ return node_info
+
+ def get_all_nodes_list(self, output='short'):
+ """
+ Returns list of nodes registered in Swarm
+
+ :param output: Defines format of returned data
+ :return:
+ If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
+ if 'output' is 'long' then returns data is list of dict containing the attributes as in
+ output of command 'docker node ls'
+ """
+ nodes_list = []
+
+ nodes_inspect = self.get_all_nodes_inspect()
+ if nodes_inspect is None:
+ return None
+
+ if output == 'short':
+ for node in nodes_inspect:
+ nodes_list.append(node['Description']['Hostname'])
+ elif output == 'long':
+ for node in nodes_inspect:
+ node_property = {}
+
+ node_property.update({'ID': node['ID']})
+ node_property.update({'Hostname': node['Description']['Hostname']})
+ node_property.update({'Status': node['Status']['State']})
+ node_property.update({'Availability': node['Spec']['Availability']})
+ if 'ManagerStatus' in node:
+ if node['ManagerStatus']['Leader'] is True:
+ node_property.update({'Leader': True})
+ node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
+ node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
+
+ nodes_list.append(node_property)
+ else:
+ return None
+
+ return nodes_list
+
+ def get_node_name_by_id(self, nodeid):
+ return self.get_node_inspect(nodeid)['Description']['Hostname']
+
+ def get_unlock_key(self):
+ if self.docker_py_version < LooseVersion('2.7.0'):
+ return None
+ return super(AnsibleDockerSwarmClient, self).get_unlock_key()
+
+ def get_service_inspect(self, service_id, skip_missing=False):
+ """
+ Returns Swarm service info as in 'docker service inspect' command about single service
+
+ :param service_id: service ID or name
+ :param skip_missing: if True then function will return None instead of failing the task
+ :return:
+ Single service information structure
+ """
+ try:
+ service_info = self.inspect_service(service_id)
+ except NotFound as exc:
+ if skip_missing is False:
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ else:
+ return None
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
+ self.fail("Error inspecting swarm service: %s" % exc)
+ except Exception as exc:
+ self.fail("Error inspecting swarm service: %s" % exc)
+
+ json_str = json.dumps(service_info, ensure_ascii=False)
+ service_info = json.loads(json_str)
+ return service_info
diff --git a/ansible_collections/community/docker/plugins/module_utils/util.py b/ansible_collections/community/docker/plugins/module_utils/util.py
new file mode 100644
index 000000000..9c6b738c6
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/util.py
@@ -0,0 +1,394 @@
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import re
+from datetime import timedelta
+
+from ansible.module_utils.basic import env_fallback
+from ansible.module_utils.common.collections import is_sequence
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+
+
+DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
+DEFAULT_TLS = False
+DEFAULT_TLS_VERIFY = False
+DEFAULT_TLS_HOSTNAME = 'localhost' # deprecated
+DEFAULT_TIMEOUT_SECONDS = 60
+
+DOCKER_COMMON_ARGS = dict(
+ docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
+ tls_hostname=dict(type='str', fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
+ api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
+ ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
+ client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
+ client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
+ ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
+ tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
+ use_ssh_client=dict(type='bool', default=False),
+ validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
+ debug=dict(type='bool', default=False)
+)
+
+DOCKER_COMMON_ARGS_VARS = dict([
+ [option_name, 'ansible_docker_%s' % option_name]
+ for option_name in DOCKER_COMMON_ARGS
+ if option_name != 'debug'
+])
+
+DOCKER_MUTUALLY_EXCLUSIVE = []
+
+DOCKER_REQUIRED_TOGETHER = [
+ ['client_cert', 'client_key']
+]
+
+DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
+BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+
+
+def is_image_name_id(name):
+ """Check whether the given image name is in fact an image ID (hash)."""
+ if re.match('^sha256:[0-9a-fA-F]{64}$', name):
+ return True
+ return False
+
+
+def is_valid_tag(tag, allow_empty=False):
+ """Check whether the given string is a valid docker tag name."""
+ if not tag:
+ return allow_empty
+ # See here ("Extended description") for a definition what tags can be:
+ # https://docs.docker.com/engine/reference/commandline/tag/
+ return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
+
+
+def sanitize_result(data):
+ """Sanitize data object for return to Ansible.
+
+ When the data object contains types such as docker.types.containers.HostConfig,
+ Ansible will fail when these are returned via exit_json or fail_json.
+ HostConfig is derived from dict, but its constructor requires additional
+ arguments. This function sanitizes data structures by recursively converting
+ everything derived from dict to dict and everything derived from list (and tuple)
+ to a list.
+ """
+ if isinstance(data, dict):
+ return dict((k, sanitize_result(v)) for k, v in data.items())
+ elif isinstance(data, (list, tuple)):
+ return [sanitize_result(v) for v in data]
+ else:
+ return data
+
+
+class DockerBaseClass(object):
+ def __init__(self):
+ self.debug = False
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+
+def update_tls_hostname(result, old_behavior=False, deprecate_function=None, uses_tls=True):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ result['tls_hostname'] = parsed_url.netloc.rsplit(':', 1)[0]
+
+
+def compare_dict_allow_more_present(av, bv):
+ '''
+ Compare two dictionaries for whether every entry of the first is in the second.
+ '''
+ for key, value in av.items():
+ if key not in bv:
+ return False
+ if bv[key] != value:
+ return False
+ return True
+
+
+def compare_generic(a, b, method, datatype):
+ '''
+ Compare values a and b as described by method and datatype.
+
+ Returns ``True`` if the values compare equal, and ``False`` if not.
+
+ ``a`` is usually the module's parameter, while ``b`` is a property
+ of the current object. ``a`` must not be ``None`` (except for
+ ``datatype == 'value'``).
+
+ Valid values for ``method`` are:
+ - ``ignore`` (always compare as equal);
+ - ``strict`` (only compare if really equal)
+ - ``allow_more_present`` (allow b to have elements which a does not have).
+
+ Valid values for ``datatype`` are:
+ - ``value``: for simple values (strings, numbers, ...);
+ - ``list``: for ``list``s or ``tuple``s where order matters;
+ - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
+ matter;
+ - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
+ not matter and which contain ``dict``s; ``allow_more_present`` is used
+ for the ``dict``s, and these are assumed to be dictionaries of values;
+ - ``dict``: for dictionaries of values.
+ '''
+ if method == 'ignore':
+ return True
+ # If a or b is None:
+ if a is None or b is None:
+ # If both are None: equality
+ if a == b:
+ return True
+ # Otherwise, not equal for values, and equal
+ # if the other is empty for set/list/dict
+ if datatype == 'value':
+ return False
+ # For allow_more_present, allow a to be None
+ if method == 'allow_more_present' and a is None:
+ return True
+ # Otherwise, the iterable object which is not None must have length 0
+ return len(b if a is None else a) == 0
+ # Do proper comparison (both objects not None)
+ if datatype == 'value':
+ return a == b
+ elif datatype == 'list':
+ if method == 'strict':
+ return a == b
+ else:
+ i = 0
+ for v in a:
+ while i < len(b) and b[i] != v:
+ i += 1
+ if i == len(b):
+ return False
+ i += 1
+ return True
+ elif datatype == 'dict':
+ if method == 'strict':
+ return a == b
+ else:
+ return compare_dict_allow_more_present(a, b)
+ elif datatype == 'set':
+ set_a = set(a)
+ set_b = set(b)
+ if method == 'strict':
+ return set_a == set_b
+ else:
+ return set_b >= set_a
+ elif datatype == 'set(dict)':
+ for av in a:
+ found = False
+ for bv in b:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ if method == 'strict':
+ # If we would know that both a and b do not contain duplicates,
+ # we could simply compare len(a) to len(b) to finish this test.
+ # We can assume that b has no duplicates (as it is returned by
+ # docker), but we don't know for a.
+ for bv in b:
+ found = False
+ for av in a:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ return True
+
+
+class DifferenceTracker(object):
+ def __init__(self):
+ self._diff = []
+
+ def add(self, name, parameter=None, active=None):
+ self._diff.append(dict(
+ name=name,
+ parameter=parameter,
+ active=active,
+ ))
+
+ def merge(self, other_tracker):
+ self._diff.extend(other_tracker._diff)
+
+ @property
+ def empty(self):
+ return len(self._diff) == 0
+
+ def get_before_after(self):
+ '''
+ Return texts ``before`` and ``after``.
+ '''
+ before = dict()
+ after = dict()
+ for item in self._diff:
+ before[item['name']] = item['active']
+ after[item['name']] = item['parameter']
+ return before, after
+
+ def has_difference_for(self, name):
+ '''
+ Returns a boolean if a difference exists for name
+ '''
+ return any(diff for diff in self._diff if diff['name'] == name)
+
+ def get_legacy_docker_container_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = []
+ for entry in self._diff:
+ item = dict()
+ item[entry['name']] = dict(
+ parameter=entry['parameter'],
+ container=entry['active'],
+ )
+ result.append(item)
+ return result
+
+ def get_legacy_docker_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = [entry['name'] for entry in self._diff]
+ return result
+
+
+def clean_dict_booleans_for_docker_api(data, allow_sequences=False):
+ '''
+ Go doesn't like Python booleans 'True' or 'False', while Ansible is just
+ fine with them in YAML. As such, they need to be converted in cases where
+ we pass dictionaries to the Docker API (e.g. docker_network's
+ driver_options and docker_prune's filters). When `allow_sequences=True`
+ YAML sequences (lists, tuples) are converted to [str] instead of str([...])
+ which is the expected format of filters which accept lists such as labels.
+ '''
+ def sanitize(value):
+ if value is True:
+ return 'true'
+ elif value is False:
+ return 'false'
+ else:
+ return str(value)
+
+ result = dict()
+ if data is not None:
+ for k, v in data.items():
+ result[str(k)] = [sanitize(e) for e in v] if allow_sequences and is_sequence(v) else sanitize(v)
+ return result
+
+
+def convert_duration_to_nanosecond(time_str):
+ """
+ Return time duration in nanosecond.
+ """
+ if not isinstance(time_str, str):
+ raise ValueError('Missing unit in duration - %s' % time_str)
+
+ regex = re.compile(
+ r'^(((?P<hours>\d+)h)?'
+ r'((?P<minutes>\d+)m(?!s))?'
+ r'((?P<seconds>\d+)s)?'
+ r'((?P<milliseconds>\d+)ms)?'
+ r'((?P<microseconds>\d+)us)?)$'
+ )
+ parts = regex.match(time_str)
+
+ if not parts:
+ raise ValueError('Invalid time duration - %s' % time_str)
+
+ parts = parts.groupdict()
+ time_params = {}
+ for (name, value) in parts.items():
+ if value:
+ time_params[name] = int(value)
+
+ delta = timedelta(**time_params)
+ time_in_nanoseconds = (
+ delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
+ ) * 10 ** 3
+
+ return time_in_nanoseconds
+
+
+def normalize_healthcheck_test(test):
+ if isinstance(test, (tuple, list)):
+ return [str(e) for e in test]
+ return ['CMD-SHELL', str(test)]
+
+
+def normalize_healthcheck(healthcheck, normalize_test=False):
+ """
+ Return dictionary of healthcheck parameters.
+ """
+ result = dict()
+
+ # All supported healthcheck parameters
+ options = ('test', 'interval', 'timeout', 'start_period', 'retries')
+
+ duration_options = ('interval', 'timeout', 'start_period')
+
+ for key in options:
+ if key in healthcheck:
+ value = healthcheck[key]
+ if value is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if key in duration_options:
+ value = convert_duration_to_nanosecond(value)
+ if not value:
+ continue
+ if key == 'retries':
+ try:
+ value = int(value)
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse number of retries for healthcheck. '
+ 'Expected an integer, got "{0}".'.format(value)
+ )
+ if key == 'test' and normalize_test:
+ value = normalize_healthcheck_test(value)
+ result[key] = value
+
+ return result
+
+
+def parse_healthcheck(healthcheck):
+ """
+ Return dictionary of healthcheck parameters and boolean if
+ healthcheck defined in image was requested to be disabled.
+ """
+ if (not healthcheck) or (not healthcheck.get('test')):
+ return None, None
+
+ result = normalize_healthcheck(healthcheck, normalize_test=True)
+
+ if result['test'] == ['NONE']:
+ # If the user explicitly disables the healthcheck, return None
+ # as the healthcheck object, and set disable_healthcheck to True
+ return None, True
+
+ return result, False
+
+
+def omit_none_from_dict(d):
+ """
+ Return a copy of the dictionary with all keys with value None omitted.
+ """
+ return dict((k, v) for (k, v) in d.items() if v is not None)
diff --git a/ansible_collections/community/docker/plugins/module_utils/version.py b/ansible_collections/community/docker/plugins/module_utils/version.py
new file mode 100644
index 000000000..5184d70eb
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/module_utils/version.py
@@ -0,0 +1,24 @@
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+"""Provide version object to compare version numbers."""
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+# Once we drop support for ansible-core 2.11, we can remove the try/except.
+
+from ansible.module_utils.six import raise_from
+
+try:
+ from ansible.module_utils.compat.version import LooseVersion, StrictVersion # noqa: F401, pylint: disable=unused-import
+except ImportError:
+ try:
+ from distutils.version import LooseVersion, StrictVersion # noqa: F401, pylint: disable=unused-import
+ except ImportError as exc:
+ msg = 'To use this plugin or module with ansible-core 2.11, you need to use Python < 3.12 with distutils.version present'
+ raise_from(ImportError(msg), exc)
diff --git a/ansible_collections/community/docker/plugins/modules/current_container_facts.py b/ansible_collections/community/docker/plugins/modules/current_container_facts.py
new file mode 100644
index 000000000..f2cde2b59
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/current_container_facts.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2020 Matt Clay <mclay@redhat.com>
+# Copyright (c) 2020 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = r'''
+---
+module: current_container_facts
+short_description: Return facts about whether the module runs in a container
+version_added: 1.1.0
+description:
+ - Return facts about whether the module runs in a Docker or podman container.
+ - This module attempts a best-effort detection. There might be special cases where
+ it does not work; if you encounter one, L(please file an issue,
+ https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&template=bug_report.md).
+author:
+ - Felix Fontein (@felixfontein)
+extends_documentation_fragment:
+ - community.docker.attributes
+ - community.docker.attributes.facts
+ - community.docker.attributes.facts_module
+'''
+
+EXAMPLES = '''
+- name: Get facts on current container
+ community.docker.current_container_facts:
+
+- name: Print information on current container when running in a container
+ ansible.builtin.debug:
+ msg: "Container ID is {{ ansible_module_container_id }}"
+ when: ansible_module_running_in_container
+'''
+
+RETURN = r'''
+ansible_facts:
+ description: Ansible facts returned by the module
+ type: dict
+ returned: always
+ contains:
+ ansible_module_running_in_container:
+ description:
+ - Whether the module was able to detect that it runs in a container or not.
+ returned: always
+ type: bool
+ ansible_module_container_id:
+ description:
+ - The detected container ID.
+ - Contains an empty string if no container was detected.
+ returned: always
+ type: str
+ ansible_module_container_type:
+ description:
+ - The detected container environment.
+ - Contains an empty string if no container was detected, or a non-empty string identifying the container environment.
+ - C(docker) indicates that the module ran inside a regular Docker container.
+ - C(azure_pipelines) indicates that the module ran on Azure Pipelines. This seems to no longer be reported.
+ - C(github_actions) indicates that the module ran inside a Docker container on GitHub Actions. It is supported since community.docker 2.4.0.
+ - C(podman) indicates that the module ran inside a regular Podman container. It is supported since community.docker 3.3.0.
+ returned: always
+ type: str
+ choices:
+ - ''
+ - docker
+ - azure_pipelines
+ - github_actions
+ - podman
+'''
+
+import os
+import re
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(dict(), supports_check_mode=True)
+
+ cpuset_path = '/proc/self/cpuset'
+ mountinfo_path = '/proc/self/mountinfo'
+
+ container_id = ''
+ container_type = ''
+
+ contents = None
+ if os.path.exists(cpuset_path):
+ # File content varies based on the environment:
+ # No Container: /
+ # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
+ # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
+ # Podman: /../../../../../..
+ # While this was true and worked well for a long time, this seems to be no longer accurate
+ # with newer Docker / Podman versions and/or with cgroupv2. That's why the /proc/self/mountinfo
+ # detection further down is done when this test is inconclusive.
+ with open(cpuset_path, 'rb') as f:
+ contents = f.read().decode('utf-8')
+
+ cgroup_path, cgroup_name = os.path.split(contents.strip())
+
+ if cgroup_path == '/docker':
+ container_id = cgroup_name
+ container_type = 'docker'
+
+ if cgroup_path == '/azpl_job':
+ container_id = cgroup_name
+ container_type = 'azure_pipelines'
+
+ if cgroup_path == '/actions_job':
+ container_id = cgroup_name
+ container_type = 'github_actions'
+
+ if not container_id and os.path.exists(mountinfo_path):
+ with open(mountinfo_path, 'rb') as f:
+ contents = f.read().decode('utf-8')
+
+ # As to why this works, see the explanations by Matt Clay in
+ # https://github.com/ansible/ansible/blob/80d2f8da02052f64396da6b8caaf820eedbf18e2/test/lib/ansible_test/_internal/docker_util.py#L571-L610
+
+ for line in contents.splitlines():
+ parts = line.split()
+ if len(parts) >= 5 and parts[4] == '/etc/hostname':
+ m = re.match('.*/([a-f0-9]{64})/hostname$', parts[3])
+ if m:
+ container_id = m.group(1)
+ container_type = 'docker'
+
+ m = re.match('.*/([a-f0-9]{64})/userdata/hostname$', parts[3])
+ if m:
+ container_id = m.group(1)
+ container_type = 'podman'
+
+ module.exit_json(ansible_facts=dict(
+ ansible_module_running_in_container=container_id != '',
+ ansible_module_container_id=container_id,
+ ansible_module_container_type=container_type,
+ ))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_compose.py b/ansible_collections/community/docker/plugins/modules/docker_compose.py
new file mode 100644
index 000000000..01db6a22f
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_compose.py
@@ -0,0 +1,1236 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services. B(This module requires docker-compose < 2.0.0.)
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ env_file:
+ description:
+ - By default environment files are loaded from a C(.env) file located directly under the I(project_src) directory.
+ - I(env_file) can be used to specify the path of a custom environment file instead.
+ - The path is relative to the I(project_src) directory.
+ - Requires C(docker-compose) version 1.25.0 or greater.
+ - "Note: C(docker-compose) versions C(<=1.28) load the env file from the current working directory of the
+ C(docker-compose) command rather than I(project_src)."
+ type: path
+ version_added: 1.9.0
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ profiles:
+ description:
+ - List of profiles to enable when starting services.
+ - Equivalent to C(docker-compose --profile).
+ - Requires C(docker-compose) version 1.28.0 or greater.
+ type: list
+ elements: str
+ version_added: 1.8.0
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: true
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: false
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: false
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: false
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: false
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: false
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ - Requires C(docker-compose) version 1.17.0 or greater for full support. For older versions, the services will
+ first be started and then stopped when the service is supposed to be created as stopped.
+ type: bool
+ default: false
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: false
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: false
+ timeout:
+ description:
+ - Timeout in seconds for container shutdown when attached or when containers are already running.
+ - By default C(compose) will use a C(10s) timeout unless C(default_grace_period) is defined for a
+ particular service in the I(project_src).
+ type: int
+ default: null
+ use_ssh_client:
+ description:
+ - Currently ignored for this module, but might suddenly be supported later on.
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0"
+ - "docker-compose >= 1.7.0, < 2.0.0"
+ - "Docker API >= 1.25"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Tear down existing services
+ community.docker.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.docker.docker_compose:
+ project_src: flask
+ register: output
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.docker.docker_compose:
+ project_src: flask
+ build: false
+ register: output
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: not output.changed
+
+ - name: Stop all services
+ community.docker.docker_compose:
+ project_src: flask
+ build: false
+ stopped: true
+ register: output
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: output
+
+ - name: Verify that web and db services are running
+ ansible.builtin.assert:
+ that:
+ - "not output.services.web.flask_web_1.state.running"
+ - "not output.services.db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.docker.docker_compose:
+ project_src: flask
+ build: false
+ restarted: true
+ register: output
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: output
+
+ - name: Verify that web and db services are running
+ ansible.builtin.assert:
+ that:
+ - "output.services.web.flask_web_1.state.running"
+ - "output.services.db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Scale the web service to two instances
+ community.docker.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: output
+
+- name: Run with inline Compose file version 2
+ # https://docs.docker.com/compose/compose-file/compose-file-v2/
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Remove flask project
+ community.docker.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Start flask project with inline definition
+ community.docker.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: output
+
+ - name: Verify that the db and web services are running
+ ansible.builtin.assert:
+ that:
+ - "output.services.web.flask_web_1.state.running"
+ - "output.services.db.flask_db_1.state.running"
+
+- name: Run with inline Compose file version 1
+ # https://docs.docker.com/compose/compose-file/compose-file-v1/
+ hosts: localhost
+ gather_facts: false
+ tasks:
+ - name: Remove flask project
+ community.docker.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Start flask project with inline definition
+ community.docker.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: output
+
+ - name: Verify that web and db services are running
+ ansible.builtin.assert:
+ that:
+ - "output.services.web.flask_web_1.state.running"
+ - "output.services.db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(true)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [to_native(w) for w in warnings],
+ 'errors': [to_native(e) for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.env_file:
+ self.options[u'--env-file'] = self.env_file
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if self.profiles:
+ self.options[u'--profile'] = self.profiles
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ to_native(HAS_COMPOSE_EXC))
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % to_native(HAS_YAML_EXC))
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, to_native(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % to_native(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] |= pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] |= build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action == 'start' and self.stopped:
+ # In case the only action is starting, and the user requested
+ # that the service should be stopped, ignore this service.
+ continue
+ if not self._service_profile_enabled(service):
+ continue
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ up_kwargs = {
+ 'service_names': service_names,
+ 'start_deps': start_deps,
+ 'strategy': converge,
+ 'do_build': do_build,
+ 'detached': detached,
+ 'remove_orphans': self.remove_orphans,
+ 'timeout': self.timeout,
+ }
+
+ if LooseVersion(compose_version) >= LooseVersion('1.17.0'):
+ up_kwargs['start'] = not self.stopped
+ elif self.stopped:
+ self.client.module.warn(
+ "The 'stopped' option requires docker-compose version >= 1.17.0. " +
+ "This task was run with docker-compose version %s." % compose_version
+ )
+
+ self.project.up(**up_kwargs)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] |= stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] |= restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] |= scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % to_native(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % to_native(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % to_native(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def _service_profile_enabled(self, service):
+ """Returns `True` if the service has no profiles defined or has a profile which is among
+ the profiles passed to the `docker compose up` command. Otherwise returns `False`.
+ """
+ if LooseVersion(compose_version) < LooseVersion('1.28.0'):
+ return True
+ return service.enabled_for_profiles(self.profiles or [])
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, to_native(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ env_file=dict(type='path'),
+ files=dict(type='list', elements='path'),
+ profiles=dict(type='list', elements='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int')
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ )
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_config.py b/ansible_collections/community/docker/plugins/modules/docker_config.py
new file mode 100644
index 000000000..9f55e0f0d
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_config.py
@@ -0,0 +1,434 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_config
+
+short_description: Manage docker configs.
+
+description:
+ - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
+ - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
+ unless the I(force) option is set.
+ - Updates to configs are performed by removing the config and creating it again.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_2_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ data:
+ description:
+ - The value of the config.
+ - Mutually exclusive with I(data_src). One of I(data) and I(data_src) is required if I(state=present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: false
+ data_src:
+ description:
+ - The file on the target from which to read the config.
+ - Mutually exclusive with I(data). One of I(data) and I(data_src) is required if I(state=present).
+ type: path
+ version_added: 1.10.0
+ labels:
+ description:
+ - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
+ - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing config.
+ - If C(true), an existing config will be replaced, even if it has not been changed.
+ type: bool
+ default: false
+ rolling_versions:
+ description:
+ - If set to C(true), configs are created with an increasing version number appended to their name.
+ - Adds a label containing the version number to the managed configs with the name C(ansible_version).
+ type: bool
+ default: false
+ version_added: 2.2.0
+ versions_to_keep:
+ description:
+ - When using I(rolling_versions), the number of old versions of the config to keep.
+ - Extraneous old configs are deleted after the new one is created.
+ - Set to C(-1) to keep everything or to C(0) or C(1) to keep only the current one.
+ type: int
+ default: 5
+ version_added: 2.2.0
+ name:
+ description:
+ - The name of the config.
+ type: str
+ required: true
+ state:
+ description:
+ - Set to C(present), if the config should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ template_driver:
+ description:
+ - Set to C(golang) to use a Go template in I(data) or a Go template file in I(data_src).
+ type: str
+ choices:
+ - golang
+ version_added: 2.5.0
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
+ - "Docker API >= 1.30"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - John Hu (@ushuz)
+'''
+
+EXAMPLES = '''
+
+- name: Create config foo (from a file on the control machine)
+ community.docker.docker_config:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Create config foo (from a file on the target machine)
+ community.docker.docker_config:
+ name: foo
+ data_src: /path/to/config/file
+ state: present
+
+- name: Change the config data
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the config
+ two: '2'
+ state: present
+
+- name: No change
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing config
+ state: present
+
+- name: Update an existing label
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the config
+ one: '1'
+ state: present
+
+- name: Force the (re-)creation of the config
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ force: true
+ state: present
+
+- name: Remove config foo
+ community.docker.docker_config:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+config_id:
+ description:
+ - The ID assigned by Docker to the config object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+config_name:
+ description:
+ - The name of the created config object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'awesome_config'
+ version_added: 2.2.0
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ compare_generic,
+)
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+
+
+class ConfigManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ConfigManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ data_src = parameters.get('data_src')
+ if data_src is not None:
+ try:
+ with open(data_src, 'rb') as f:
+ self.data = f.read()
+ except Exception as exc:
+ self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc)))
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.rolling_versions = parameters.get('rolling_versions')
+ self.versions_to_keep = parameters.get('versions_to_keep')
+ self.template_driver = parameters.get('template_driver')
+
+ if self.rolling_versions:
+ self.version = 0
+ self.data_key = None
+ self.configs = []
+
+ def __call__(self):
+ self.get_config()
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ self.remove_old_versions()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_version(self, config):
+ try:
+ return int(config.get('Spec', {}).get('Labels', {}).get('ansible_version', 0))
+ except ValueError:
+ return 0
+
+ def remove_old_versions(self):
+ if not self.rolling_versions or self.versions_to_keep < 0:
+ return
+ if not self.check_mode:
+ while len(self.configs) > max(self.versions_to_keep, 1):
+ self.remove_config(self.configs.pop(0))
+
+ def get_config(self):
+ ''' Find an existing config. '''
+ try:
+ configs = self.client.configs(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
+
+ if self.rolling_versions:
+ self.configs = [
+ config
+ for config in configs
+ if config['Spec']['Name'].startswith('{name}_v'.format(name=self.name))
+ ]
+ self.configs.sort(key=self.get_version)
+ else:
+ self.configs = [
+ config for config in configs if config['Spec']['Name'] == self.name
+ ]
+
+ def create_config(self):
+ ''' Create a new config '''
+ config_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.rolling_versions:
+ self.version += 1
+ labels['ansible_version'] = str(self.version)
+ self.name = '{name}_v{version}'.format(name=self.name, version=self.version)
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ # only use templating argument when self.template_driver is defined
+ kwargs = {}
+ if self.template_driver:
+ kwargs['templating'] = {
+ 'name': self.template_driver
+ }
+ config_id = self.client.create_config(self.name, self.data, labels=labels, **kwargs)
+ self.configs += self.client.configs(filters={'id': config_id})
+ except APIError as exc:
+ self.client.fail("Error creating config: %s" % to_native(exc))
+
+ if isinstance(config_id, dict):
+ config_id = config_id['ID']
+
+ return config_id
+
+ def remove_config(self, config):
+ try:
+ if not self.check_mode:
+ self.client.remove_config(config['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing config %s: %s" % (config['Spec']['Name'], to_native(exc)))
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the config '''
+ if self.configs:
+ config = self.configs[-1]
+ self.results['config_id'] = config['ID']
+ self.results['config_name'] = config['Spec']['Name']
+ data_changed = False
+ template_driver_changed = False
+ attrs = config.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ else:
+ if not self.force:
+ self.client.module.warn("'ansible_key' label not found. Config will not be changed unless the force parameter is set to 'true'")
+ # template_driver has changed if it was set in the previous config
+ # and now it differs, or if it wasn't set but now it is.
+ if attrs.get('Templating', {}).get('Name'):
+ if attrs['Templating']['Name'] != self.template_driver:
+ template_driver_changed = True
+ elif self.template_driver:
+ template_driver_changed = True
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if self.rolling_versions:
+ self.version = self.get_version(config)
+ if data_changed or template_driver_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the config
+ if not self.rolling_versions:
+ self.absent()
+ config_id = self.create_config()
+ self.results['changed'] = True
+ self.results['config_id'] = config_id
+ self.results['config_name'] = self.name
+ else:
+ self.results['changed'] = True
+ self.results['config_id'] = self.create_config()
+ self.results['config_name'] = self.name
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the config '''
+ if self.configs:
+ for config in self.configs:
+ self.remove_config(config)
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str'),
+ data_is_b64=dict(type='bool', default=False),
+ data_src=dict(type='path'),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False),
+ rolling_versions=dict(type='bool', default=False),
+ versions_to_keep=dict(type='int', default=5),
+ template_driver=dict(type='str', choices=['golang']),
+ )
+
+ required_if = [
+ ('state', 'present', ['data', 'data_src'], True),
+ ]
+
+ mutually_exclusive = [
+ ('data', 'data_src'),
+ ]
+
+ option_minimal_versions = dict(
+ template_driver=dict(docker_py_version='5.0.3', docker_api_version='1.37'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive,
+ min_docker_version='2.6.0',
+ min_docker_api_version='1.30',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ ConfigManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_container.py b/ansible_collections/community/docker/plugins/modules/docker_container.py
new file mode 100644
index 000000000..9d1ed416e
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_container.py
@@ -0,0 +1,1288 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container
+
+short_description: manage Docker containers
+
+description:
+ - Manage the life cycle of Docker containers.
+ - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken.
+
+notes:
+ - For most config changes, the container needs to be recreated. This means that the existing container has to be destroyed and
+ a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to
+ prevent this.
+ - If the module needs to recreate the container, it will only use the options provided to the module to create the
+ new container (except I(image)). Therefore, always specify B(all) options relevant to the container.
+ - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - When trying to pull an image, the module assumes this is always changed in check mode.
+ diff_mode:
+ support: full
+
+options:
+ auto_remove:
+ description:
+ - Enable auto-removal of the container on daemon side when the container's process exits.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ blkio_weight:
+ description:
+ - Block IO (relative weight), between 10 and 1000.
+ type: int
+ capabilities:
+ description:
+ - List of capabilities to add to the container.
+ - This is equivalent to C(docker run --cap-add), or the docker-compose option C(cap_add).
+ type: list
+ elements: str
+ cap_drop:
+ description:
+ - List of capabilities to drop from the container.
+ type: list
+ elements: str
+ cgroupns_mode:
+ description:
+ - Specify the cgroup namespace mode for the container.
+ - The Docker CLI calls this simply C(cgroupns).
+ type: str
+ choices:
+ - host
+ - private
+ version_added: 3.0.0
+ cgroup_parent:
+ description:
+ - Specify the parent cgroup for the container.
+ type: str
+ version_added: 1.1.0
+ cleanup:
+ description:
+ - Use with I(detach=false) to remove the container after successful execution.
+ type: bool
+ default: false
+ command:
+ description:
+ - Command to execute when the container starts. A command may be either a string or a list.
+ - Prior to version 2.4, strings were split on commas.
+ - See I(command_handling) for differences in how strings and lists are handled.
+ type: raw
+ comparisons:
+ description:
+ - Allows to specify how properties of existing containers are compared with
+ module options to decide whether the container should be recreated / updated
+ or not.
+ - Only options which correspond to the state of a container as handled by the
+ Docker daemon can be specified, as well as I(networks).
+ - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
+ and C(allow_more_present).
+ - If C(strict) is specified, values are tested for equality, and changes always
+ result in updating or restarting. If C(ignore) is specified, changes are ignored.
+ - C(allow_more_present) is allowed only for lists, sets and dicts. If it is
+ specified for lists or sets, the container will only be updated or restarted if
+ the module option contains a value which is not present in the container's
+ options. If the option is specified for a dict, the container will only be updated
+ or restarted if the module option contains a key which is not present in the
+ container's option, or if the value of a key present differs.
+ - The wildcard option C(*) can be used to set one of the default values C(strict)
+ or C(ignore) to I(all) comparisons which are not explicitly set to other values.
+ - See the examples for details.
+ type: dict
+ container_default_behavior:
+ description:
+ - In older versions of this module, various module options used to have default values.
+ This caused problems with containers which use different values for these options.
+ - The default value is now C(no_defaults). To restore the old behavior, set it to
+ C(compatibility), which will ensure that the default values are used when the values
+ are not explicitly specified by the user.
+ - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory),
+ I(paused), I(privileged), I(read_only) and I(tty) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ default: no_defaults
+ command_handling:
+ description:
+ - The default behavior for I(command) (when provided as a list) and I(entrypoint) is to
+ convert them to strings without considering shell quoting rules. (For comparing idempotency,
+ the resulting string is split considering shell quoting rules.)
+ - Also, setting I(command) to an empty list of string, and setting I(entrypoint) to an empty
+ list will be handled as if these options are not specified. This is different from idempotency
+ handling for other container-config related options.
+ - When this is set to C(compatibility), which was the default until community.docker 3.0.0, the
+ current behavior will be kept.
+ - When this is set to C(correct), these options are kept as lists, and an empty value or empty
+ list will be handled correctly for idempotency checks. This has been the default since
+ community.docker 3.0.0.
+ type: str
+ choices:
+ - compatibility
+ - correct
+ version_added: 1.9.0
+ default: correct
+ cpu_period:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) period.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpu_quota:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) quota.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpus:
+ description:
+ - Specify how much of the available CPU resources a container can use.
+ - A value of C(1.5) means that at most one and a half CPU (core) will be used.
+ type: float
+ cpuset_cpus:
+ description:
+ - CPUs in which to allow execution C(1,3) or C(1-3).
+ type: str
+ cpuset_mems:
+ description:
+ - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1).
+ type: str
+ cpu_shares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ default_host_ip:
+ description:
+ - Define the default host IP to use.
+ - Must be an empty string, an IPv4 address, or an IPv6 address.
+ - With Docker 20.10.2 or newer, this should be set to an empty string (C("")) to avoid the
+ port bindings without an explicit IP address to only bind to IPv4.
+ See U(https://github.com/ansible-collections/community.docker/issues/70) for details.
+ - By default, the module will try to auto-detect this value from the C(bridge) network's
+ C(com.docker.network.bridge.host_binding_ipv4) option. If it cannot auto-detect it, it
+ will fall back to C(0.0.0.0).
+ type: str
+ version_added: 1.2.0
+ detach:
+ description:
+ - Enable detached mode to leave the container running in background.
+ - If disabled, the task will reflect the status of the container run (failed if the command failed).
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(true).
+ type: bool
+ devices:
+ description:
+ - List of host device bindings to add to the container.
+ - "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)."
+ type: list
+ elements: str
+ device_read_bps:
+ description:
+ - "List of device path and read rate (bytes per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: true
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: true
+ device_write_bps:
+ description:
+ - "List of device and write rate (bytes per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: true
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: true
+ device_read_iops:
+ description:
+ - "List of device and read rate (IO per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: true
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: true
+ device_write_iops:
+ description:
+ - "List of device and write rate (IO per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: true
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: true
+ device_requests:
+ description:
+ - Allows to request additional resources, such as GPUs.
+ type: list
+ elements: dict
+ suboptions:
+ capabilities:
+ description:
+ - List of lists of strings to request capabilities.
+ - The top-level list entries are combined by OR, and for every list entry,
+ the entries in the list it contains are combined by AND.
+ - The driver tries to satisfy one of the sub-lists.
+ - Available capabilities for the C(nvidia) driver can be found at
+ U(https://github.com/NVIDIA/nvidia-container-runtime).
+ type: list
+ elements: list
+ count:
+ description:
+ - Number or devices to request.
+ - Set to C(-1) to request all available devices.
+ type: int
+ device_ids:
+ description:
+ - List of device IDs.
+ type: list
+ elements: str
+ driver:
+ description:
+ - Which driver to use for this device.
+ type: str
+ options:
+ description:
+ - Driver-specific options.
+ type: dict
+ version_added: 0.1.0
+ dns_opts:
+ description:
+ - List of DNS options.
+ type: list
+ elements: str
+ dns_servers:
+ description:
+ - List of custom DNS servers.
+ type: list
+ elements: str
+ dns_search_domains:
+ description:
+ - List of custom DNS search domains.
+ type: list
+ elements: str
+ domainname:
+ description:
+ - Container domainname.
+ type: str
+ env:
+ description:
+ - Dictionary of key,value pairs.
+ - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example C("true")) in order to avoid data loss.
+ - Please note that if you are passing values in with Jinja2 templates, like C("{{ value }}"), you need to add C(| string) to prevent Ansible to
+ convert strings such as C("true") back to booleans. The correct way is to use C("{{ value | string }}").
+ type: dict
+ env_file:
+ description:
+ - Path to a file, present on the target, containing environment variables I(FOO=BAR).
+ - If variable also present in I(env), then the I(env) value will override.
+ type: path
+ entrypoint:
+ description:
+ - Command that overwrites the default C(ENTRYPOINT) of the image.
+ - See I(command_handling) for differences in how strings and lists are handled.
+ type: list
+ elements: str
+ etc_hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's C(/etc/hosts) file.
+ type: dict
+ exposed_ports:
+ description:
+ - List of additional container ports which informs Docker that the container
+ listens on the specified network ports at runtime.
+ - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not
+ need to be exposed again.
+ type: list
+ elements: str
+ aliases:
+ - exposed
+ - expose
+ force_kill:
+ description:
+ - Use the kill command when stopping a running container.
+ type: bool
+ default: false
+ aliases:
+ - forcekill
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work."
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ retries:
+ description:
+ - Consecutive number of failures needed to report unhealthy.
+ - The default used by the Docker daemon is C(3).
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ - The default used by the Docker daemon is C(0s).
+ type: str
+ hostname:
+ description:
+ - The container's hostname.
+ type: str
+ ignore_image:
+ description:
+ - When I(state) is C(present) or C(started), the module compares the configuration of an existing
+ container to requested configuration. The evaluation includes the image version. If the image
+ version in the registry does not match the container, the container will be recreated. You can
+ stop this behavior by setting I(ignore_image) to C(true).
+ - "B(Warning:) This option is ignored if C(image: ignore) or C(*: ignore) is specified in the
+ I(comparisons) option."
+ - "This option is deprecated since community.docker 3.2.0 and will be removed in community.docker 4.0.0.
+ Use C(image: ignore) in I(comparisons) instead of I(ignore_image=true)."
+ type: bool
+ default: false
+ image:
+ description:
+ - Repository path and tag used to create the container. If an image is not found or pull is true, the image
+ will be pulled from the registry. If no tag is included, C(latest) will be used.
+ - Can also be an image ID. If this is the case, the image is assumed to be available locally.
+ The I(pull) option is ignored for this case.
+ type: str
+ image_comparison:
+ description:
+ - Determines which image to use for idempotency checks that depend on image parameters.
+ - The default, C(desired-image), will use the image that is provided to the module via the I(image) parameter.
+ - C(current-image) will use the image that the container is currently using, if the container exists. It
+ falls back to the image that is provided in case the container does not yet exist.
+ - This affects the I(env), I(env_file), I(exposed_ports), I(labels), and I(volumes) options.
+ type: str
+ choices:
+ - desired-image
+ - current-image
+ default: desired-image
+ version_added: 3.0.0
+ image_label_mismatch:
+ description:
+ - How to handle labels inherited from the image that are not set explicitly.
+ - When C(ignore), labels that are present in the image but not specified in I(labels) will be
+ ignored. This is useful to avoid having to specify the image labels in I(labels) while keeping
+ labels I(comparisons) C(strict).
+ - When C(fail), if there are labels present in the image which are not set from I(labels), the
+ module will fail. This prevents introducing unexpected labels from the base image.
+ - "B(Warning:) This option is ignored unless C(labels: strict) or C(*: strict) is specified in
+ the I(comparisons) option."
+ type: str
+ choices:
+ - 'ignore'
+ - 'fail'
+ default: ignore
+ version_added: 2.6.0
+ image_name_mismatch:
+ description:
+ - Determines what the module does if the image matches, but the image name in the container's configuration
+ does not match the image name provided to the module.
+ - "This is ignored if C(image: ignore) is set in I(comparisons)."
+ - If set to C(recreate) the container will be recreated.
+ - If set to C(ignore) the container will not be recreated because of this. It might still get recreated for other reasons.
+ This has been the default behavior of the module for a long time, but might not be what users expect.
+ type: str
+ choices:
+ - recreate
+ - ignore
+ default: ignore
+ version_added: 3.2.0
+ init:
+ description:
+ - Run an init inside the container that forwards signals and reaps processes.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ interactive:
+ description:
+ - Keep stdin open after a container is launched, even if not attached.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ ipc_mode:
+ description:
+ - Set the IPC mode for the container.
+ - Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use
+ the host's IPC namespace within the container.
+ type: str
+ keep_volumes:
+ description:
+ - Retain anonymous volumes associated with a removed container.
+ type: bool
+ default: true
+ kill_signal:
+ description:
+ - Override default signal used to kill a running container.
+ type: str
+ kernel_memory:
+ description:
+ - "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ type: dict
+ links:
+ description:
+ - List of name aliases for linked containers in the format C(container_name:alias).
+ - Setting this will force container to be restarted.
+ type: list
+ elements: str
+ log_driver:
+ description:
+ - Specify the logging driver. Docker uses C(json-file) by default.
+ - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
+ type: str
+ log_options:
+ description:
+ - Dictionary of options specific to the chosen I(log_driver).
+ - See U(https://docs.docker.com/engine/admin/logging/overview/) for details.
+ - I(log_driver) needs to be specified for I(log_options) to take effect, even if using the default C(json-file) driver.
+ type: dict
+ aliases:
+ - log_opt
+ mac_address:
+ description:
+ - Container MAC address (for example, C(92:d0:c6:0a:29:33)).
+ type: str
+ memory:
+ description:
+ - "Memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C("0").
+ type: str
+ memory_reservation:
+ description:
+ - "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swap:
+ description:
+ - "Total memory limit (memory + swap) in format C(<number>[<unit>]), or
+ the special values C(unlimited) or C(-1) for unlimited swap usage.
+ Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
+ C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swappiness:
+ description:
+ - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
+ - If not set, the value will be remain the same if container exists and will be inherited
+ from the host machine if it is (re-)created.
+ type: int
+ mounts:
+ type: list
+ elements: dict
+ description:
+ - Specification for mounts to be added to the container. More powerful alternative to I(volumes).
+ suboptions:
+ target:
+ description:
+ - Path inside the container.
+ type: str
+ required: true
+ source:
+ description:
+ - Mount source.
+ - For example, this can be a volume name or a host path.
+ - If not supplied when I(type=volume) an anonymous volume will be created.
+ type: str
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows.
+ type: str
+ choices:
+ - bind
+ - npipe
+ - tmpfs
+ - volume
+ default: volume
+ read_only:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ consistency:
+ description:
+ - The consistency requirement for the mount.
+ type: str
+ choices:
+ - cached
+ - consistent
+ - default
+ - delegated
+ propagation:
+ description:
+ - Propagation mode. Only valid for the C(bind) type.
+ type: str
+ choices:
+ - private
+ - rprivate
+ - shared
+ - rshared
+ - slave
+ - rslave
+ no_copy:
+ description:
+ - False if the volume should be populated with the data from the target. Only valid for the C(volume) type.
+ - The default value is C(false).
+ type: bool
+ labels:
+ description:
+ - User-defined name and labels for the volume. Only valid for the C(volume) type.
+ type: dict
+ volume_driver:
+ description:
+ - Specify the volume driver. Only valid for the C(volume) type.
+ - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: str
+ volume_options:
+ description:
+ - Dictionary of options specific to the chosen volume_driver. See
+ L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: dict
+ tmpfs_size:
+ description:
+ - "The size for the tmpfs mount in bytes in format <number>[<unit>]."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ tmpfs_mode:
+ description:
+ - The permission mode for the tmpfs mount.
+ type: str
+ name:
+ description:
+ - Assign a name to a new container or match an existing container.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: true
+ network_mode:
+ description:
+ - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default).
+ - "Since community.docker 2.0.0, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network,
+ the default value for I(network_mode) is the name of the first network in the I(networks) list. You can prevent this
+ by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if
+ I(network_mode) is not specified."
+ type: str
+ userns_mode:
+ description:
+ - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string.
+ type: str
+ networks:
+ description:
+ - List of networks the container belongs to.
+ - For examples of the data structure and usage see EXAMPLES below.
+ - "To remove a container from one or more networks, use C(networks: strict) in the I(comparisons) option."
+ - "If I(networks_cli_compatible) is set to C(false), this will not remove the default network if I(networks) is specified.
+ This is different from the behavior of C(docker run ...). You need to explicitly use C(networks: strict) in I(comparisons)
+ to enforce the removal of the default network (and all other networks not explicitly mentioned in I(networks)) in that case."
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The network's name.
+ type: str
+ required: true
+ ipv4_address:
+ description:
+ - The container's IPv4 address in this network.
+ type: str
+ ipv6_address:
+ description:
+ - The container's IPv6 address in this network.
+ type: str
+ links:
+ description:
+ - A list of containers to link to.
+ type: list
+ elements: str
+ aliases:
+ description:
+ - List of aliases for this container in this network. These names
+ can be used in the network to reach this container.
+ type: list
+ elements: str
+ networks_cli_compatible:
+ description:
+ - "If I(networks_cli_compatible) is set to C(true) (default), this module will behave as
+ C(docker run --network) and will B(not) add the default network if I(networks) is
+ specified. If I(networks) is not specified, the default network will be attached."
+ - "When I(networks_cli_compatible) is set to C(false) and networks are provided to the module
+ via the I(networks) option, the module behaves differently than C(docker run --network):
+ C(docker run --network other) will create a container with network C(other) attached,
+ but the default network not attached. This module with I(networks: {name: other}) will
+ create a container with both C(default) and C(other) attached. If C(networks: strict)
+ or C(*: strict) is set in I(comparisons), the C(default) network will be removed afterwards."
+ type: bool
+ default: true
+ oom_killer:
+ description:
+ - Whether or not to disable OOM Killer for the container.
+ type: bool
+ oom_score_adj:
+ description:
+ - An integer value containing the score given to the container in order to tune
+ OOM killer preferences.
+ type: int
+ output_logs:
+ description:
+ - If set to true, output of the container command will be printed.
+ - Only effective when I(log_driver) is set to C(json-file), C(journald), or C(local).
+ type: bool
+ default: false
+ paused:
+ description:
+ - Use with the started state to pause running processes inside the container.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ pid_mode:
+ description:
+ - Set the PID namespace mode for the container.
+ type: str
+ pids_limit:
+ description:
+ - Set PIDs limit for the container. It accepts an integer value.
+ - Set C(-1) for unlimited PIDs.
+ type: int
+ platform:
+ description:
+ - Platform for the container in the format C(os[/arch[/variant]]).
+ - "Please note that inspecting the container does not always return the exact platform string used to
+ create the container. This can cause idempotency to break for this module. Use the I(comparisons) option
+ with C(platform: ignore) to prevent accidental recreation of the container due to this."
+ type: str
+ version_added: 3.0.0
+ privileged:
+ description:
+ - Give extended privileges to the container.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ publish_all_ports:
+ description:
+ - Publish all ports to the host.
+ - Any specified port bindings from I(published_ports) will remain intact when C(true).
+ type: bool
+ version_added: 1.8.0
+ published_ports:
+ description:
+ - List of ports to publish from the container to the host.
+ - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
+ container port, 9000 is a host port, and 0.0.0.0 is a host interface."
+ - Port ranges can be used for source and destination ports. If two ranges with
+ different lengths are specified, the shorter range will be used.
+ Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned
+ to the first port of the destination range, but to a free port in that range. This is the
+ same behavior as for C(docker) command line utility.
+ - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are B(not) allowed. This
+ is different from the C(docker) command line utility. Use the R(dig lookup,ansible_collections.community.general.dig_lookup)
+ to resolve hostnames."
+ - If I(networks) parameter is provided, will inspect each network to see if there exists
+ a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4).
+ If such a network is found, then published ports where no host IP address is specified
+ will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4).
+ Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4)
+ value encountered in the list of I(networks) is the one that will be used.
+ - The value C(all) was allowed in earlier versions of this module. Support for it was removed in
+ community.docker 3.0.0. Use the I(publish_all_ports) option instead.
+ type: list
+ elements: str
+ aliases:
+ - ports
+ pull:
+ description:
+ - If true, always pull the latest version of an image. Otherwise, will only pull an image
+ when missing.
+ - "B(Note:) images are only pulled when specified by name. If the image is specified
+ as a image ID (hash), it cannot be pulled."
+ type: bool
+ default: false
+ purge_networks:
+ description:
+ - Remove the container from ALL networks not included in I(networks) parameter.
+ - Any default networks such as C(bridge), if not found in I(networks), will be removed as well.
+ - "This option is deprecated since community.docker 3.2.0 and will be removed in community.docker 4.0.0.
+ Use C(networks: strict) in I(comparisons) instead of I(purge_networks=true) and make sure that
+ I(networks) is specified. If you want to remove all networks, specify I(networks: [])."
+ type: bool
+ default: false
+ read_only:
+ description:
+ - Mount the container's root file system as read-only.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ recreate:
+ description:
+ - Use with present and started states to force the re-creation of an existing container.
+ type: bool
+ default: false
+ removal_wait_timeout:
+ description:
+ - When removing an existing container, the docker daemon API call exists after the container
+ is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O
+ load, removal can take longer. By default, the module will wait until the container has been
+ removed before trying to (re-)create it, however long this takes.
+ - By setting this option, the module will wait at most this many seconds for the container to be
+ removed. If the container is still in the removal phase after this many seconds, the module will
+ fail.
+ type: float
+ restart:
+ description:
+ - Use with started state to force a matching container to be stopped and restarted.
+ type: bool
+ default: false
+ restart_policy:
+ description:
+ - Container restart policy.
+ - Place quotes around C(no) option.
+ type: str
+ choices:
+ - 'no'
+ - 'on-failure'
+ - 'always'
+ - 'unless-stopped'
+ restart_retries:
+ description:
+ - Use with restart policy to control maximum number of restart attempts.
+ type: int
+ runtime:
+ description:
+ - Runtime to use for the container.
+ type: str
+ shm_size:
+ description:
+ - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M).
+ type: str
+ security_opts:
+ description:
+ - List of security options in the form of C("label:user:User").
+ type: list
+ elements: str
+ state:
+ description:
+ - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container
+ rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.'
+ - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
+ container matches the name, a container will be created. If a container matches the name but the provided configuration
+ does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
+ with the requested config.'
+ - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running
+ state. Use I(restart) to force a matching container to be stopped and restarted.'
+ - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped
+ state.'
+ - "To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the
+ image version will be taken into account, you can also use the C(image: ignore) in the I(comparisons) option."
+ - Use the I(recreate) option to always force re-creation of a matching container, even if it is running.
+ - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is
+ C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ - Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ type: str
+ default: started
+ choices:
+ - absent
+ - present
+ - stopped
+ - started
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ type: str
+ stop_timeout:
+ description:
+ - Number of seconds to wait for the container to stop before sending C(SIGKILL).
+ When the container is created by this module, its C(StopTimeout) configuration
+ will be set to this value.
+ - When the container is stopped, will be used as a timeout for stopping the
+ container. In case the container has a custom C(StopTimeout) configuration,
+ the behavior depends on the version of the docker daemon. New versions of
+ the docker daemon will always use the container's configured C(StopTimeout)
+ value if it has been configured.
+ type: int
+ storage_opts:
+ description:
+ - Storage driver options for this container as a key-value mapping.
+ type: dict
+ version_added: 1.3.0
+ tmpfs:
+ description:
+ - Mount a tmpfs directory.
+ type: list
+ elements: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false).
+ type: bool
+ ulimits:
+ description:
+ - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)."
+ type: list
+ elements: str
+ sysctls:
+ description:
+ - Dictionary of key,value pairs.
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used and optionally the groupname or GID for the specified command.
+ - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)."
+ type: str
+ uts:
+ description:
+ - Set the UTS namespace mode for the container.
+ type: str
+ volumes:
+ description:
+ - List of volumes to mount within the container.
+ - "Use docker CLI-style syntax: C(/host:/container[:mode])"
+ - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
+ C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and
+ C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes."
+ - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume.
+ - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
+ C(z), and C(Z)."
+ type: list
+ elements: str
+ volume_driver:
+ description:
+ - The container volume driver.
+ type: str
+ volumes_from:
+ description:
+ - List of container names or IDs to get volumes from.
+ type: list
+ elements: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ type: str
+
+author:
+ - "Cove Schneider (@cove)"
+ - "Joshua Conner (@joshuaconner)"
+ - "Pavel Antonov (@softzilla)"
+ - "Thomas Steinbach (@ThomasSteinbach)"
+ - "Philippe Jandot (@zfil)"
+ - "Daan Oosterveld (@dusdanig)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Kassian Sun (@kassiansun)"
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Create a data container
+ community.docker.docker_container:
+ name: mydata
+ image: busybox
+ volumes:
+ - /data
+
+- name: Re-create a redis container
+ community.docker.docker_container:
+ name: myredis
+ image: redis
+ command: redis-server --appendonly yes
+ state: present
+ recreate: true
+ exposed_ports:
+ - 6379
+ volumes_from:
+ - mydata
+
+- name: Restart a container
+ community.docker.docker_container:
+ name: myapplication
+ image: someuser/appimage
+ state: started
+ restart: true
+ links:
+ - "myredis:aliasedredis"
+ devices:
+ - "/dev/sda:/dev/xvda:rwm"
+ ports:
+ # Publish container port 9000 as host port 8080
+ - "8080:9000"
+ # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1
+ - "127.0.0.1:8081:9001/udp"
+ # Publish container port 9002 as a random host port
+ - "9002"
+ # Publish container port 9003 as a free host port in range 8000-8100
+ # (the host port will be selected by the Docker daemon)
+ - "8000-8100:9003"
+ # Publish container ports 9010-9020 to host ports 7000-7010
+ - "7000-7010:9010-9020"
+ env:
+ SECRET_KEY: "ssssh"
+ # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
+ BOOLEAN_KEY: "yes"
+
+- name: Container present
+ community.docker.docker_container:
+ name: mycontainer
+ state: present
+ image: ubuntu:14.04
+ command: sleep infinity
+
+- name: Stop a container
+ community.docker.docker_container:
+ name: mycontainer
+ state: stopped
+
+- name: Start 4 load-balanced containers
+ community.docker.docker_container:
+ name: "container{{ item }}"
+ recreate: true
+ image: someuser/anotherappimage
+ command: sleep 1d
+ with_sequence: count=4
+
+- name: Remove container
+ community.docker.docker_container:
+ name: ohno
+ state: absent
+
+- name: Syslogging output
+ community.docker.docker_container:
+ name: myservice
+ image: busybox
+ log_driver: syslog
+ log_options:
+ syslog-address: tcp://my-syslog-server:514
+ syslog-facility: daemon
+ # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
+ # older docker installs, use "syslog-tag" instead
+ tag: myservice
+
+- name: Create db container and connect to network
+ community.docker.docker_container:
+ name: db_test
+ image: "postgres:latest"
+ networks:
+ - name: "{{ docker_network_name }}"
+
+- name: Start container, connect to network and link
+ community.docker.docker_container:
+ name: sleeper
+ image: ubuntu:14.04
+ networks:
+ - name: TestingNet
+ ipv4_address: "172.16.1.100"
+ aliases:
+ - sleepyzz
+ links:
+ - db_test:db
+ - name: TestingNet2
+
+- name: Start a container with a command
+ community.docker.docker_container:
+ name: sleepy
+ image: ubuntu:14.04
+ command: ["sleep", "infinity"]
+
+- name: Add container to networks
+ community.docker.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ ipv4_address: 172.16.1.18
+ links:
+ - sleeper
+ - name: TestingNet2
+ ipv4_address: 172.16.10.20
+
+- name: Update network with aliases
+ community.docker.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ aliases:
+ - sleepyz
+ - zzzz
+
+- name: Remove container from one network
+ community.docker.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet2
+ comparisons:
+ networks: strict
+
+- name: Remove container from all networks
+ community.docker.docker_container:
+ name: sleepy
+ comparisons:
+ networks: strict
+
+- name: Start a container and use an env file
+ community.docker.docker_container:
+ name: agent
+ image: jenkinsci/ssh-slave
+ env_file: /var/tmp/jenkins/agent.env
+
+- name: Create a container with limited capabilities
+ community.docker.docker_container:
+ name: sleepy
+ image: ubuntu:16.04
+ command: sleep infinity
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+
+- name: Finer container restart/update control
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ volumes:
+ - /tmp:/tmp
+ comparisons:
+ image: ignore # do not restart containers with older versions of the image
+ env: strict # we want precisely this environment
+ volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
+
+- name: Finer container restart/update control II
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ comparisons:
+ '*': ignore # by default, ignore *all* options (including image)
+ env: strict # except for environment variables; there, we want to be strict
+
+- name: Start container with healthstatus
+ community.docker.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Remove healthcheck from container
+ community.docker.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # The "NONE" check needs to be specified
+ test: ["NONE"]
+
+- name: Create a tmpfs with a size and mode
+ community.docker.docker_container:
+ name: tmpfs test
+ image: ubuntu:22.04
+ state: started
+ mounts:
+ - type: tmpfs
+ target: /cache
+ tmpfs_mode: "1700" # only readable to the owner
+ tmpfs_size: "16G"
+
+- name: Start container with block device read limit
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_read_bps:
+ # Limit read rate for /dev/sda to 20 mebibytes per second
+ - path: /dev/sda
+ rate: 20M
+ device_read_iops:
+ # Limit read rate for /dev/sdb to 300 IO per second
+ - path: /dev/sdb
+ rate: 300
+
+- name: Start container with GPUs
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_requests:
+ - # Add some specific devices to this container
+ device_ids:
+ - '0'
+ - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ - # Add nVidia GPUs to this container
+ driver: nvidia
+ count: -1 # this means we want all
+ capabilities:
+ # We have one OR condition: 'gpu' AND 'utility'
+ - - gpu
+ - utility
+ # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities
+ # for a list of capabilities supported by the nvidia driver
+
+- name: Start container with storage options
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ storage_opts:
+ # Limit root filesystem to 12 MB - note that this requires special storage backends
+ # (https://fabianlee.org/2020/01/15/docker-use-overlay2-with-an-xfs-backing-filesystem-to-limit-rootfs-size/)
+ size: 12m
+'''
+
+RETURN = '''
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Empty if I(state) is C(absent).
+ - If I(detach=false), will include C(Output) attribute containing any output from container run.
+ returned: success; or when I(state=started) and I(detach=false), and when waiting for the container result did not fail
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+status:
+ description:
+ - In case a container is started without detaching, this contains the exit code of the process in the container.
+ - Before community.docker 1.1.0, this was only returned when non-zero.
+ returned: when I(state=started) and I(detach=false), and when waiting for the container result did not fail
+ type: int
+ sample: 0
+'''
+
+from ansible_collections.community.docker.plugins.module_utils.module_container.docker_api import (
+ DockerAPIEngineDriver,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.module_container.module import (
+ run_module,
+)
+
+
+def main():
+ engine_driver = DockerAPIEngineDriver()
+ run_module(engine_driver)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py b/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py
new file mode 100644
index 000000000..f140bfe6a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py
@@ -0,0 +1,870 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2022, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_copy_into
+
+short_description: Copy a file into a Docker container
+
+version_added: 3.4.0
+
+description:
+ - Copy a file into a Docker container.
+ - Similar to C(docker cp).
+ - To copy files in a non-running container, you must provide the I(owner_id) and I(group_id) options.
+ This is also necessary if the container does not contain a C(/bin/sh) shell with an C(id) tool.
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+ details:
+ - Additional data will need to be transferred to compute diffs.
+ - The module uses R(the MAX_FILE_SIZE_FOR_DIFF ansible-core configuration,MAX_FILE_SIZE_FOR_DIFF)
+ to determine for how large files diffs should be computed.
+
+options:
+ container:
+ description:
+ - The name of the container to copy files to.
+ type: str
+ required: true
+ path:
+ description:
+ - Path to a file on the managed node.
+ - Mutually exclusive with I(content). One of I(content) and I(path) is required.
+ type: path
+ content:
+ description:
+ - The file's content.
+ - If you plan to provide binary data, provide it pre-encoded to base64, and set I(content_is_b64=true).
+ - Mutually exclusive with I(path). One of I(content) and I(path) is required.
+ type: str
+ content_is_b64:
+ description:
+ - If set to C(true), the content in I(content) is assumed to be Base64 encoded and
+ will be decoded before being used.
+ - To use binary I(content), it is better to keep it Base64 encoded and let it
+ be decoded by this option. Otherwise you risk the data to be interpreted as
+ UTF-8 and corrupted.
+ type: bool
+ default: false
+ container_path:
+ description:
+ - Path to a file inside the Docker container.
+ - Must be an absolute path.
+ type: str
+ required: true
+ follow:
+ description:
+ - This flag indicates that filesystem links in the Docker container, if they exist, should be followed.
+ type: bool
+ default: false
+ local_follow:
+ description:
+ - This flag indicates that filesystem links in the source tree (where the module is executed), if they exist, should be followed.
+ type: bool
+ default: true
+ owner_id:
+ description:
+ - The owner ID to use when writing the file to disk.
+ - If provided, I(group_id) must also be provided.
+ - If not provided, the module will try to determine the user and group ID for the current user in the container.
+ This will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available.
+ Also the container must be running.
+ type: int
+ group_id:
+ description:
+ - The group ID to use when writing the file to disk.
+ - If provided, I(owner_id) must also be provided.
+ - If not provided, the module will try to determine the user and group ID for the current user in the container.
+ This will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available.
+ Also the container must be running.
+ type: int
+ mode:
+ description:
+ - The file mode to use when writing the file to disk.
+ - Will use the file's mode from the source system if this option is not provided.
+ type: int
+ force:
+ description:
+ - If set to C(true), force writing the file (without performing any idempotency checks).
+ - If set to C(false), only write the file if it does not exist on the target. If a filesystem object exists at
+ the destination, the module will not do any change.
+ - If this option is not specified, the module will be idempotent. To verify idempotency, it will try to get information
+ on the filesystem object in the container, and if everything seems to match will download the file from the container
+ to compare it to the file to upload.
+ type: bool
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Copy a file into the container
+ community.docker.docker_container_copy_into:
+ container: mydata
+ path: /home/user/data.txt
+ container_path: /data/input.txt
+
+- name: Copy a file into the container with owner, group, and mode set
+ community.docker.docker_container_copy_into:
+ container: mydata
+ path: /home/user/bin/runme.o
+ container_path: /bin/runme
+ owner: 0 # root
+ group: 0 # root
+ mode: 0o755 # readable and executable by all users, writable by root
+'''
+
+RETURN = '''
+container_path:
+ description:
+ - The actual path in the container.
+ - Can only be different from I(container_path) when I(follow=true).
+ type: str
+ returned: success
+'''
+
+import base64
+import io
+import os
+import stat
+import traceback
+
+from ansible.module_utils._text import to_bytes, to_native, to_text
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.copy import (
+ DockerFileCopyError,
+ DockerFileNotFound,
+ DockerUnexpectedError,
+ determine_user_group,
+ fetch_file_ex,
+ put_file,
+ put_file_content,
+ stat_file,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._scramble import generate_insecure_key, scramble
+
+
+def are_fileobjs_equal(f1, f2):
+ '''Given two (buffered) file objects, compare their contents.'''
+ blocksize = 65536
+ b1buf = b''
+ b2buf = b''
+ while True:
+ if f1 and len(b1buf) < blocksize:
+ f1b = f1.read(blocksize)
+ if not f1b:
+ # f1 is EOF, so stop reading from it
+ f1 = None
+ b1buf += f1b
+ if f2 and len(b2buf) < blocksize:
+ f2b = f2.read(blocksize)
+ if not f2b:
+ # f2 is EOF, so stop reading from it
+ f2 = None
+ b2buf += f2b
+ if not b1buf or not b2buf:
+ # At least one of f1 and f2 is EOF and all its data has
+ # been processed. If both are EOF and their data has been
+ # processed, the files are equal, otherwise not.
+ return not b1buf and not b2buf
+ # Compare the next chunk of data, and remove it from the buffers
+ buflen = min(len(b1buf), len(b2buf))
+ if b1buf[:buflen] != b2buf[:buflen]:
+ return False
+ b1buf = b1buf[buflen:]
+ b2buf = b2buf[buflen:]
+
+
+def are_fileobjs_equal_read_first(f1, f2):
+ '''Given two (buffered) file objects, compare their contents.
+
+ Returns a tuple (is_equal, content_of_f1), where the first element indicates
+ whether the two file objects have the same content, and the second element is
+ the content of the first file object.'''
+ blocksize = 65536
+ b1buf = b''
+ b2buf = b''
+ is_equal = True
+ content = []
+ while True:
+ if f1 and len(b1buf) < blocksize:
+ f1b = f1.read(blocksize)
+ if not f1b:
+ # f1 is EOF, so stop reading from it
+ f1 = None
+ b1buf += f1b
+ if f2 and len(b2buf) < blocksize:
+ f2b = f2.read(blocksize)
+ if not f2b:
+ # f2 is EOF, so stop reading from it
+ f2 = None
+ b2buf += f2b
+ if not b1buf or not b2buf:
+ # At least one of f1 and f2 is EOF and all its data has
+ # been processed. If both are EOF and their data has been
+ # processed, the files are equal, otherwise not.
+ is_equal = not b1buf and not b2buf
+ break
+ # Compare the next chunk of data, and remove it from the buffers
+ buflen = min(len(b1buf), len(b2buf))
+ if b1buf[:buflen] != b2buf[:buflen]:
+ is_equal = False
+ break
+ content.append(b1buf[:buflen])
+ b1buf = b1buf[buflen:]
+ b2buf = b2buf[buflen:]
+
+ content.append(b1buf)
+ if f1:
+ content.append(f1.read())
+
+ return is_equal, b''.join(content)
+
+
+def is_container_file_not_regular_file(container_stat):
+ for bit in (
+ # https://pkg.go.dev/io/fs#FileMode
+ 32 - 1, # ModeDir
+ 32 - 4, # ModeTemporary
+ 32 - 5, # ModeSymlink
+ 32 - 6, # ModeDevice
+ 32 - 7, # ModeNamedPipe
+ 32 - 8, # ModeSocket
+ 32 - 11, # ModeCharDevice
+ 32 - 13, # ModeIrregular
+ ):
+ if container_stat['mode'] & (1 << bit) != 0:
+ return True
+ return False
+
+
+def get_container_file_mode(container_stat):
+ mode = container_stat['mode'] & 0xFFF
+ if container_stat['mode'] & (1 << (32 - 9)) != 0: # ModeSetuid
+ mode |= stat.S_ISUID # set UID bit
+ if container_stat['mode'] & (1 << (32 - 10)) != 0: # ModeSetgid
+ mode |= stat.S_ISGID # set GID bit
+ if container_stat['mode'] & (1 << (32 - 12)) != 0: # ModeSticky
+ mode |= stat.S_ISVTX # sticky bit
+ return mode
+
+
+def add_other_diff(diff, in_path, member):
+ if diff is None:
+ return
+ diff['before_header'] = in_path
+ if member.isdir():
+ diff['before'] = '(directory)'
+ elif member.issym() or member.islnk():
+ diff['before'] = member.linkname
+ elif member.ischr():
+ diff['before'] = '(character device)'
+ elif member.isblk():
+ diff['before'] = '(block device)'
+ elif member.isfifo():
+ diff['before'] = '(fifo)'
+ elif member.isdev():
+ diff['before'] = '(device)'
+ elif member.isfile():
+ raise DockerUnexpectedError('should not be a regular file')
+ else:
+ diff['before'] = '(unknown filesystem object)'
+
+
+def retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat=None, link_target=None):
+ if diff is None:
+ return
+ if regular_stat is not None:
+ # First handle all filesystem object types that are not regular files
+ if regular_stat['mode'] & (1 << (32 - 1)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(directory)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 4)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(temporary file)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 5)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = link_target
+ return
+ elif regular_stat['mode'] & (1 << (32 - 6)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(device)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 7)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(named pipe)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 8)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(socket)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 11)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(character device)'
+ return
+ elif regular_stat['mode'] & (1 << (32 - 13)) != 0:
+ diff['before_header'] = container_path
+ diff['before'] = '(unknown filesystem object)'
+ return
+ # Check whether file is too large
+ if regular_stat['size'] > max_file_size_for_diff > 0:
+ diff['dst_larger'] = max_file_size_for_diff
+ return
+
+ # We need to get hold of the content
+ def process_none(in_path):
+ diff['before'] = ''
+
+ def process_regular(in_path, tar, member):
+ add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
+
+ def process_symlink(in_path, member):
+ diff['before_header'] = in_path
+ diff['before'] = member.linkname
+
+ def process_other(in_path, member):
+ add_other_diff(diff, in_path, member)
+
+ fetch_file_ex(
+ client,
+ container,
+ in_path=container_path,
+ process_none=process_none,
+ process_regular=process_regular,
+ process_symlink=process_symlink,
+ process_other=process_other,
+ follow_links=follow_links,
+ )
+
+
+def is_binary(content):
+ if b'\x00' in content:
+ return True
+ # TODO: better detection
+ # (ansible-core also just checks for 0x00, and even just sticks to the first 8k, so this isn't too bad...)
+ return False
+
+
+def are_fileobjs_equal_with_diff_of_first(f1, f2, size, diff, max_file_size_for_diff, container_path):
+ if diff is None:
+ return are_fileobjs_equal(f1, f2)
+ if size > max_file_size_for_diff > 0:
+ diff['dst_larger'] = max_file_size_for_diff
+ return are_fileobjs_equal(f1, f2)
+ is_equal, content = are_fileobjs_equal_read_first(f1, f2)
+ if is_binary(content):
+ diff['dst_binary'] = 1
+ else:
+ diff['before_header'] = container_path
+ diff['before'] = to_text(content)
+ return is_equal
+
+
+def add_diff_dst_from_regular_member(diff, max_file_size_for_diff, container_path, tar, member):
+ if diff is None:
+ return
+ if member.size > max_file_size_for_diff > 0:
+ diff['dst_larger'] = max_file_size_for_diff
+ return
+
+ tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
+ content = tar_f.read()
+ if is_binary(content):
+ diff['dst_binary'] = 1
+ else:
+ diff['before_header'] = container_path
+ diff['before'] = to_text(content)
+
+
+def copy_dst_to_src(diff):
+ if diff is None:
+ return
+ for f, t in [
+ ('dst_size', 'src_size'),
+ ('dst_binary', 'src_binary'),
+ ('before_header', 'after_header'),
+ ('before', 'after'),
+ ]:
+ if f in diff:
+ diff[t] = diff[f]
+ elif t in diff:
+ diff.pop(t)
+
+
+def is_file_idempotent(client, container, managed_path, container_path, follow_links, local_follow_links, owner_id, group_id, mode,
+ force=False, diff=None, max_file_size_for_diff=1):
+ # Retrieve information of local file
+ try:
+ file_stat = os.stat(managed_path) if local_follow_links else os.lstat(managed_path)
+ except OSError as exc:
+ if exc.errno == 2:
+ raise DockerFileNotFound('Cannot find local file {managed_path}'.format(managed_path=managed_path))
+ raise
+ if mode is None:
+ mode = stat.S_IMODE(file_stat.st_mode)
+ if not stat.S_ISLNK(file_stat.st_mode) and not stat.S_ISREG(file_stat.st_mode):
+ raise DockerFileCopyError('Local path {managed_path} is not a symbolic link or file')
+
+ if diff is not None:
+ if file_stat.st_size > max_file_size_for_diff > 0:
+ diff['src_larger'] = max_file_size_for_diff
+ elif stat.S_ISLNK(file_stat.st_mode):
+ diff['after_header'] = managed_path
+ diff['after'] = os.readlink(managed_path)
+ else:
+ with open(managed_path, 'rb') as f:
+ content = f.read()
+ if is_binary(content):
+ diff['src_binary'] = 1
+ else:
+ diff['after_header'] = managed_path
+ diff['after'] = to_text(content)
+
+ # When forcing and we're not following links in the container, go!
+ if force and not follow_links:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
+ return container_path, mode, False
+
+ # Resolve symlinks in the container (if requested), and get information on container's file
+ real_container_path, regular_stat, link_target = stat_file(
+ client,
+ container,
+ in_path=container_path,
+ follow_links=follow_links,
+ )
+
+ # Follow links in the Docker container?
+ if follow_links:
+ container_path = real_container_path
+
+ # If the file wasn't found, continue
+ if regular_stat is None:
+ if diff is not None:
+ diff['before_header'] = container_path
+ diff['before'] = ''
+ return container_path, mode, False
+
+ # When forcing, go!
+ if force:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+
+ # If force is set to False, and the destination exists, assume there's nothing to do
+ if force is False:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ copy_dst_to_src(diff)
+ return container_path, mode, True
+
+ # Basic idempotency checks
+ if stat.S_ISLNK(file_stat.st_mode):
+ if link_target is None:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ local_link_target = os.readlink(managed_path)
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, local_link_target == link_target
+ if link_target is not None:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if is_container_file_not_regular_file(regular_stat):
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if file_stat.st_size != regular_stat['size']:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if mode != get_container_file_mode(regular_stat):
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+
+ # Fetch file from container
+ def process_none(in_path):
+ return container_path, mode, False
+
+ def process_regular(in_path, tar, member):
+ # Check things like user/group ID and mode
+ if any([
+ member.mode & 0xFFF != mode,
+ member.uid != owner_id,
+ member.gid != group_id,
+ not stat.S_ISREG(file_stat.st_mode),
+ member.size != file_stat.st_size,
+ ]):
+ add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
+ return container_path, mode, False
+
+ tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
+ with open(managed_path, 'rb') as local_f:
+ is_equal = are_fileobjs_equal_with_diff_of_first(tar_f, local_f, member.size, diff, max_file_size_for_diff, in_path)
+ return container_path, mode, is_equal
+
+ def process_symlink(in_path, member):
+ if diff is not None:
+ diff['before_header'] = in_path
+ diff['before'] = member.linkname
+
+ # Check things like user/group ID and mode
+ if member.mode & 0xFFF != mode:
+ return container_path, mode, False
+ if member.uid != owner_id:
+ return container_path, mode, False
+ if member.gid != group_id:
+ return container_path, mode, False
+
+ if not stat.S_ISLNK(file_stat.st_mode):
+ return container_path, mode, False
+
+ local_link_target = os.readlink(managed_path)
+ return container_path, mode, member.linkname == local_link_target
+
+ def process_other(in_path, member):
+ add_other_diff(diff, in_path, member)
+ return container_path, mode, False
+
+ return fetch_file_ex(
+ client,
+ container,
+ in_path=container_path,
+ process_none=process_none,
+ process_regular=process_regular,
+ process_symlink=process_symlink,
+ process_other=process_other,
+ follow_links=follow_links,
+ )
+
+
+def copy_file_into_container(client, container, managed_path, container_path, follow_links, local_follow_links,
+ owner_id, group_id, mode, force=False, diff=False, max_file_size_for_diff=1):
+ if diff:
+ diff = {}
+ else:
+ diff = None
+
+ container_path, mode, idempotent = is_file_idempotent(
+ client,
+ container,
+ managed_path,
+ container_path,
+ follow_links,
+ local_follow_links,
+ owner_id,
+ group_id,
+ mode,
+ force=force,
+ diff=diff,
+ max_file_size_for_diff=max_file_size_for_diff,
+ )
+ changed = not idempotent
+
+ if changed and not client.module.check_mode:
+ put_file(
+ client,
+ container,
+ in_path=managed_path,
+ out_path=container_path,
+ user_id=owner_id,
+ group_id=group_id,
+ mode=mode,
+ follow_links=local_follow_links,
+ )
+
+ result = dict(
+ container_path=container_path,
+ changed=changed,
+ )
+ if diff:
+ result['diff'] = diff
+ client.module.exit_json(**result)
+
+
+def is_content_idempotent(client, container, content, container_path, follow_links, owner_id, group_id, mode,
+ force=False, diff=None, max_file_size_for_diff=1):
+ if diff is not None:
+ if len(content) > max_file_size_for_diff > 0:
+ diff['src_larger'] = max_file_size_for_diff
+ elif is_binary(content):
+ diff['src_binary'] = 1
+ else:
+ diff['after_header'] = 'dynamically generated'
+ diff['after'] = to_text(content)
+
+ # When forcing and we're not following links in the container, go!
+ if force and not follow_links:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff)
+ return container_path, mode, False
+
+ # Resolve symlinks in the container (if requested), and get information on container's file
+ real_container_path, regular_stat, link_target = stat_file(
+ client,
+ container,
+ in_path=container_path,
+ follow_links=follow_links,
+ )
+
+ # Follow links in the Docker container?
+ if follow_links:
+ container_path = real_container_path
+
+ # If the file wasn't found, continue
+ if regular_stat is None:
+ if diff is not None:
+ diff['before_header'] = container_path
+ diff['before'] = ''
+ return container_path, mode, False
+
+ # When forcing, go!
+ if force:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+
+ # If force is set to False, and the destination exists, assume there's nothing to do
+ if force is False:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ copy_dst_to_src(diff)
+ return container_path, mode, True
+
+ # Basic idempotency checks
+ if link_target is not None:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if is_container_file_not_regular_file(regular_stat):
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if len(content) != regular_stat['size']:
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+ if mode != get_container_file_mode(regular_stat):
+ retrieve_diff(client, container, container_path, follow_links, diff, max_file_size_for_diff, regular_stat, link_target)
+ return container_path, mode, False
+
+ # Fetch file from container
+ def process_none(in_path):
+ if diff is not None:
+ diff['before'] = ''
+ return container_path, mode, False
+
+ def process_regular(in_path, tar, member):
+ # Check things like user/group ID and mode
+ if any([
+ member.mode & 0xFFF != mode,
+ member.uid != owner_id,
+ member.gid != group_id,
+ member.size != len(content),
+ ]):
+ add_diff_dst_from_regular_member(diff, max_file_size_for_diff, in_path, tar, member)
+ return container_path, mode, False
+
+ tar_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
+ is_equal = are_fileobjs_equal_with_diff_of_first(tar_f, io.BytesIO(content), member.size, diff, max_file_size_for_diff, in_path)
+ return container_path, mode, is_equal
+
+ def process_symlink(in_path, member):
+ if diff is not None:
+ diff['before_header'] = in_path
+ diff['before'] = member.linkname
+
+ return container_path, mode, False
+
+ def process_other(in_path, member):
+ add_other_diff(diff, in_path, member)
+ return container_path, mode, False
+
+ return fetch_file_ex(
+ client,
+ container,
+ in_path=container_path,
+ process_none=process_none,
+ process_regular=process_regular,
+ process_symlink=process_symlink,
+ process_other=process_other,
+ follow_links=follow_links,
+ )
+
+
+def copy_content_into_container(client, container, content, container_path, follow_links,
+ owner_id, group_id, mode, force=False, diff=False, max_file_size_for_diff=1):
+ if diff:
+ diff = {}
+ else:
+ diff = None
+
+ container_path, mode, idempotent = is_content_idempotent(
+ client,
+ container,
+ content,
+ container_path,
+ follow_links,
+ owner_id,
+ group_id,
+ mode,
+ force=force,
+ diff=diff,
+ max_file_size_for_diff=max_file_size_for_diff,
+ )
+ changed = not idempotent
+
+ if changed and not client.module.check_mode:
+ put_file_content(
+ client,
+ container,
+ content=content,
+ out_path=container_path,
+ user_id=owner_id,
+ group_id=group_id,
+ mode=mode,
+ )
+
+ result = dict(
+ container_path=container_path,
+ changed=changed,
+ )
+ if diff:
+ # Since the content is no_log, make sure that the before/after strings look sufficiently different
+ key = generate_insecure_key()
+ diff['scrambled_diff'] = base64.b64encode(key)
+ for k in ('before', 'after'):
+ if k in diff:
+ diff[k] = scramble(diff[k], key)
+ result['diff'] = diff
+ client.module.exit_json(**result)
+
+
+def main():
+ argument_spec = dict(
+ container=dict(type='str', required=True),
+ path=dict(type='path'),
+ container_path=dict(type='str', required=True),
+ follow=dict(type='bool', default=False),
+ local_follow=dict(type='bool', default=True),
+ owner_id=dict(type='int'),
+ group_id=dict(type='int'),
+ mode=dict(type='int'),
+ force=dict(type='bool'),
+ content=dict(type='str', no_log=True),
+ content_is_b64=dict(type='bool', default=False),
+
+ # Undocumented parameters for use by the action plugin
+ _max_file_size_for_diff=dict(type='int'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ min_docker_api_version='1.20',
+ supports_check_mode=True,
+ mutually_exclusive=[('path', 'content')],
+ required_together=[('owner_id', 'group_id')],
+ required_by={
+ 'content': ['mode'],
+ },
+ )
+
+ container = client.module.params['container']
+ managed_path = client.module.params['path']
+ container_path = client.module.params['container_path']
+ follow = client.module.params['follow']
+ local_follow = client.module.params['local_follow']
+ owner_id = client.module.params['owner_id']
+ group_id = client.module.params['group_id']
+ mode = client.module.params['mode']
+ force = client.module.params['force']
+ content = client.module.params['content']
+ max_file_size_for_diff = client.module.params['_max_file_size_for_diff'] or 1
+
+ if content is not None:
+ if client.module.params['content_is_b64']:
+ try:
+ content = base64.b64decode(content)
+ except Exception as e: # depending on Python version and error, multiple different exceptions can be raised
+ client.fail('Cannot Base64 decode the content option: {0}'.format(e))
+ else:
+ content = to_bytes(content)
+
+ if not container_path.startswith(os.path.sep):
+ container_path = os.path.join(os.path.sep, container_path)
+ container_path = os.path.normpath(container_path)
+
+ try:
+ if owner_id is None or group_id is None:
+ owner_id, group_id = determine_user_group(client, container)
+
+ if content is not None:
+ copy_content_into_container(
+ client,
+ container,
+ content,
+ container_path,
+ follow_links=follow,
+ owner_id=owner_id,
+ group_id=group_id,
+ mode=mode,
+ force=force,
+ diff=client.module._diff,
+ max_file_size_for_diff=max_file_size_for_diff,
+ )
+ elif managed_path is not None:
+ copy_file_into_container(
+ client,
+ container,
+ managed_path,
+ container_path,
+ follow_links=follow,
+ local_follow_links=local_follow,
+ owner_id=owner_id,
+ group_id=group_id,
+ mode=mode,
+ force=force,
+ diff=client.module._diff,
+ max_file_size_for_diff=max_file_size_for_diff,
+ )
+ else:
+ # Can happen if a user explicitly passes `content: null` or `path: null`...
+ client.fail('One of path and content must be supplied')
+ except NotFound as exc:
+ client.fail('Could not find container "{1}" or resource in it ({0})'.format(exc, container))
+ except APIError as exc:
+ client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc())
+ except DockerException as exc:
+ client.fail('An unexpected Docker error occurred for container "{1}": {0}'.format(exc, container), exception=traceback.format_exc())
+ except RequestException as exc:
+ client.fail(
+ 'An unexpected requests error occurred for container "{1}" when trying to talk to the Docker daemon: {0}'.format(exc, container),
+ exception=traceback.format_exc())
+ except DockerUnexpectedError as exc:
+ client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc())
+ except DockerFileCopyError as exc:
+ client.fail(to_native(exc))
+ except OSError as exc:
+ client.fail('Unexpected error: {exc}'.format(exc=to_native(exc)), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_container_exec.py b/ansible_collections/community/docker/plugins/modules/docker_container_exec.py
new file mode 100644
index 000000000..522a70a33
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_container_exec.py
@@ -0,0 +1,307 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2021, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_exec
+
+short_description: Execute command in a docker container
+
+version_added: 1.5.0
+
+description:
+ - Executes a command in a Docker container.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ container:
+ type: str
+ required: true
+ description:
+ - The name of the container to execute the command in.
+ argv:
+ type: list
+ elements: str
+ description:
+ - The command to execute.
+ - Since this is a list of arguments, no quoting is needed.
+ - Exactly one of I(argv) or I(command) must be specified.
+ command:
+ type: str
+ description:
+ - The command to execute.
+ - Exactly one of I(argv) or I(command) must be specified.
+ chdir:
+ type: str
+ description:
+ - The directory to run the command in.
+ detach:
+ description:
+ - Whether to run the command synchronously (I(detach=false), default) or asynchronously (I(detach=true)).
+ - If set to C(true), I(stdin) cannot be provided, and the return values C(stdout), C(stderr) and
+ C(rc) are not returned.
+ type: bool
+ default: false
+ version_added: 2.1.0
+ user:
+ type: str
+ description:
+ - If specified, the user to execute this command with.
+ stdin:
+ type: str
+ description:
+ - Set the stdin of the command directly to the specified value.
+ - Can only be used if I(detach=false).
+ stdin_add_newline:
+ type: bool
+ default: true
+ description:
+ - If set to C(true), appends a newline to I(stdin).
+ strip_empty_ends:
+ type: bool
+ default: true
+ description:
+ - Strip empty lines from the end of stdout/stderr in result.
+ tty:
+ type: bool
+ default: false
+ description:
+ - Whether to allocate a TTY.
+ env:
+ description:
+ - Dictionary of environment variables with their respective values to be passed to the command ran inside the container.
+ - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example C("true")) in order to avoid data loss.
+ - Please note that if you are passing values in with Jinja2 templates, like C("{{ value }}"), you need to add C(| string) to prevent Ansible to
+ convert strings such as C("true") back to booleans. The correct way is to use C("{{ value | string }}").
+ type: dict
+ version_added: 2.1.0
+
+notes:
+ - Does not support C(check_mode).
+ - Does B(not work with TCP TLS sockets) when using I(stdin). This is caused by the inability to send C(close_notify) without closing the connection
+ with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605) for more information.
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Run a simple command (command)
+ community.docker.docker_container_exec:
+ container: foo
+ command: /bin/bash -c "ls -lah"
+ chdir: /root
+ register: result
+
+- name: Print stdout
+ ansible.builtin.debug:
+ var: result.stdout
+
+- name: Run a simple command (argv)
+ community.docker.docker_container_exec:
+ container: foo
+ argv:
+ - /bin/bash
+ - "-c"
+ - "ls -lah > /dev/stderr"
+ chdir: /root
+ register: result
+
+- name: Print stderr lines
+ ansible.builtin.debug:
+ var: result.stderr_lines
+'''
+
+RETURN = '''
+stdout:
+ type: str
+ returned: success and I(detach=false)
+ description:
+ - The standard output of the container command.
+stderr:
+ type: str
+ returned: success and I(detach=false)
+ description:
+ - The standard error output of the container command.
+rc:
+ type: int
+ returned: success and I(detach=false)
+ sample: 0
+ description:
+ - The exit code of the command.
+exec_id:
+ type: str
+ returned: success and I(detach=true)
+ sample: 249d9e3075655baf705ed8f40488c5e9434049cf3431976f1bfdb73741c574c5
+ description:
+ - The execution ID of the command.
+ version_added: 2.1.0
+'''
+
+import shlex
+import traceback
+
+from ansible.module_utils.common.text.converters import to_text, to_bytes, to_native
+from ansible.module_utils.compat import selectors
+from ansible.module_utils.six import string_types
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.socket_handler import (
+ DockerSocketHandlerModule,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import format_environment
+
+
+def main():
+ argument_spec = dict(
+ container=dict(type='str', required=True),
+ argv=dict(type='list', elements='str'),
+ command=dict(type='str'),
+ chdir=dict(type='str'),
+ detach=dict(type='bool', default=False),
+ user=dict(type='str'),
+ stdin=dict(type='str'),
+ stdin_add_newline=dict(type='bool', default=True),
+ strip_empty_ends=dict(type='bool', default=True),
+ tty=dict(type='bool', default=False),
+ env=dict(type='dict'),
+ )
+
+ option_minimal_versions = dict(
+ chdir=dict(docker_api_version='1.35'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ option_minimal_versions=option_minimal_versions,
+ mutually_exclusive=[('argv', 'command')],
+ required_one_of=[('argv', 'command')],
+ )
+
+ container = client.module.params['container']
+ argv = client.module.params['argv']
+ command = client.module.params['command']
+ chdir = client.module.params['chdir']
+ detach = client.module.params['detach']
+ user = client.module.params['user']
+ stdin = client.module.params['stdin']
+ strip_empty_ends = client.module.params['strip_empty_ends']
+ tty = client.module.params['tty']
+ env = client.module.params['env']
+
+ if env is not None:
+ for name, value in list(env.items()):
+ if not isinstance(value, string_types):
+ client.module.fail_json(
+ msg="Non-string value found for env option. Ambiguous env options must be "
+ "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
+ env[name] = to_text(value, errors='surrogate_or_strict')
+
+ if command is not None:
+ argv = shlex.split(command)
+
+ if detach and stdin is not None:
+ client.module.fail_json(msg='If detach=true, stdin cannot be provided.')
+
+ if stdin is not None and client.module.params['stdin_add_newline']:
+ stdin += '\n'
+
+ try:
+ data = {
+ 'Container': container,
+ 'User': user or '',
+ 'Privileged': False,
+ 'Tty': False,
+ 'AttachStdin': bool(stdin),
+ 'AttachStdout': True,
+ 'AttachStderr': True,
+ 'Cmd': argv,
+ 'Env': format_environment(env) if env is not None else None,
+ }
+ if chdir is not None:
+ data['WorkingDir'] = chdir
+
+ exec_data = client.post_json_to_json('/containers/{0}/exec', container, data=data)
+ exec_id = exec_data['Id']
+
+ data = {
+ 'Tty': tty,
+ 'Detach': detach,
+ }
+ if detach:
+ client.post_json_to_text('/exec/{0}/start', exec_id, data=data)
+ client.module.exit_json(changed=True, exec_id=exec_id)
+
+ else:
+ if stdin and not detach:
+ exec_socket = client.post_json_to_stream_socket('/exec/{0}/start', exec_id, data=data)
+ try:
+ with DockerSocketHandlerModule(exec_socket, client.module, selectors) as exec_socket_handler:
+ if stdin:
+ exec_socket_handler.write(to_bytes(stdin))
+
+ stdout, stderr = exec_socket_handler.consume()
+ finally:
+ exec_socket.close()
+ else:
+ stdout, stderr = client.post_json_to_stream('/exec/{0}/start', exec_id, data=data, stream=False, tty=tty, demux=True)
+
+ result = client.get_json('/exec/{0}/json', exec_id)
+
+ stdout = to_text(stdout or b'')
+ stderr = to_text(stderr or b'')
+ if strip_empty_ends:
+ stdout = stdout.rstrip('\r\n')
+ stderr = stderr.rstrip('\r\n')
+
+ client.module.exit_json(
+ changed=True,
+ stdout=stdout,
+ stderr=stderr,
+ rc=result.get('ExitCode') or 0,
+ )
+ except NotFound:
+ client.fail('Could not find container "{0}"'.format(container))
+ except APIError as e:
+ if e.response is not None and e.response.status_code == 409:
+ client.fail('The container "{0}" has been paused ({1})'.format(container, to_native(e)))
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_container_info.py b/ansible_collections/community/docker/plugins/modules/docker_container_info.py
new file mode 100644
index 000000000..bfc28156b
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_container_info.py
@@ -0,0 +1,144 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_info
+
+short_description: Retrieves facts about docker container
+
+description:
+ - Retrieves facts about a docker container.
+ - Essentially returns the output of C(docker inspect <name>), similar to what M(community.docker.docker_container)
+ returns for a non-absent container.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - The name of the container to inspect.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: true
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get infos on container
+ community.docker.docker_container_info:
+ name: mydata
+ register: result
+
+- name: Does container exist?
+ ansible.builtin.debug:
+ msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about container
+ ansible.builtin.debug:
+ var: result.container
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the container exists.
+ type: bool
+ returned: always
+ sample: true
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Will be C(none) if container does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ container = client.get_container(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if container else False),
+ container=container,
+ )
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_host_info.py b/ansible_collections/community/docker/plugins/modules/docker_host_info.py
new file mode 100644
index 000000000..f08845faa
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_host_info.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_host_info
+
+short_description: Retrieves facts about docker host and lists of objects of the services.
+
+description:
+ - Retrieves facts about a docker host.
+ - Essentially returns the output of C(docker system info).
+ - The module also allows to list object names for containers, images, networks and volumes.
+ It also allows to query information on disk usage.
+ - The output differs depending on API version of the docker daemon.
+ - If the docker daemon cannot be contacted or does not meet the API version requirements,
+ the module will fail.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ details:
+ - This action does not modify state.
+ diff_mode:
+ support: N/A
+ details:
+ - This action does not modify state.
+
+options:
+ containers:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: false
+ containers_all:
+ description:
+ - By default, only running containers are returned.
+ - This corresponds to the C(--all) option to C(docker container list).
+ type: bool
+ default: false
+ version_added: 3.4.0
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to list.
+ - "For example, C(until: 24h)."
+ - C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
+ C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to list images.
+ type: bool
+ default: false
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to list.
+ - "For example, C(dangling: true)."
+ - C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
+ C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to list networks.
+ type: bool
+ default: false
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to list.
+ - C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
+ C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to list volumes.
+ type: bool
+ default: false
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to list.
+ - C(label) is a special case of filter which can be a string C(<key>) matching when a label is present, a string
+ C(<key>=<value>) matching when a label has a particular value, or a list of strings C(<key>)/C(<key>=<value).
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ disk_usage:
+ description:
+ - Summary information on used disk space by all Docker layers.
+ - The output is a sum of images, volumes, containers and build cache.
+ type: bool
+ default: false
+ verbose_output:
+ description:
+ - When set to C(true) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(true)
+ then output will contain verbose information about objects matching the full output of API method.
+ For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: false
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get info on docker host
+ community.docker.docker_host_info:
+ register: result
+
+- name: Get info on docker host and list images
+ community.docker.docker_host_info:
+ images: true
+ register: result
+
+- name: Get info on docker host and list images matching the filter
+ community.docker.docker_host_info:
+ images: true
+ images_filters:
+ label: "mylabel"
+ register: result
+
+- name: Get info on docker host and verbose list images
+ community.docker.docker_host_info:
+ images: true
+ verbose_output: true
+ register: result
+
+- name: Get info on docker host and used disk space
+ community.docker.docker_host_info:
+ disk_usage: true
+ register: result
+
+- name: Get info on docker host and list containers matching the filter
+ community.docker.docker_host_info:
+ containers: true
+ containers_filters:
+ label:
+ - key1=value1
+ - key2=value2
+ register: result
+
+- name: Show host information
+ ansible.builtin.debug:
+ var: result.host_info
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+
+host_info:
+ description:
+ - Facts representing the basic state of the docker host. Matches the C(docker system info) output.
+ returned: always
+ type: dict
+volumes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker volume ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(volumes) is C(true)
+ type: list
+ elements: dict
+networks:
+ description:
+ - List of dict objects containing the basic information about each network.
+ Keys matches the C(docker network ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(networks) is C(true)
+ type: list
+ elements: dict
+containers:
+ description:
+ - List of dict objects containing the basic information about each container.
+ Keys matches the C(docker container ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(containers) is C(true)
+ type: list
+ elements: dict
+images:
+ description:
+ - List of dict objects containing the basic information about each image.
+ Keys matches the C(docker image ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(images) is C(true)
+ type: list
+ elements: dict
+disk_usage:
+ description:
+ - Information on summary disk usage by images, containers and volumes on docker host
+ unless I(verbose_output=true). See description for I(verbose_output).
+ returned: When I(disk_usage) is C(true)
+ type: dict
+
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ clean_dict_booleans_for_docker_api,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, APIError
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import convert_filters
+
+
+class DockerHostManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerHostManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['volumes', 'networks', 'containers', 'images']
+
+ self.results['host_info'] = self.get_docker_host_info()
+
+ if self.client.module.params['disk_usage']:
+ self.results['disk_usage'] = self.get_docker_disk_usage_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name), True)
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+
+ def get_docker_host_info(self):
+ try:
+ return self.client.info()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_disk_usage_facts(self):
+ try:
+ if self.verbose_output:
+ return self.client.df()
+ else:
+ return dict(LayersSize=self.client.df()['LayersSize'])
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
+ items = None
+ items_list = []
+
+ header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
+ header_volumes = ['Driver', 'Name']
+ header_images = ['Id', 'RepoTags', 'Created', 'Size']
+ header_networks = ['Id', 'Driver', 'Name', 'Scope']
+
+ filter_arg = dict()
+ if filters:
+ filter_arg['filters'] = filters
+ try:
+ if docker_object == 'containers':
+ params = {
+ 'limit': -1,
+ 'all': 1 if self.client.module.params['containers_all'] else 0,
+ 'size': 0,
+ 'trunc_cmd': 0,
+ 'filters': convert_filters(filters) if filters else None,
+ }
+ items = self.client.get_json("/containers/json", params=params)
+ elif docker_object == 'networks':
+ params = {
+ 'filters': convert_filters(filters or {})
+ }
+ items = self.client.get_json("/networks", params=params)
+ elif docker_object == 'images':
+ params = {
+ 'only_ids': 0,
+ 'all': 0,
+ 'filters': convert_filters(filters) if filters else None,
+ }
+ items = self.client.get_json("/images/json", params=params)
+ elif docker_object == 'volumes':
+ params = {
+ 'filters': convert_filters(filters) if filters else None,
+ }
+ items = self.client.get_json('/volumes', params=params)
+ items = items['Volumes']
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host for object '%s': %s" % (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ return items
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'containers':
+ for key in header_containers:
+ item_record[key] = item.get(key)
+ elif docker_object == 'networks':
+ for key in header_networks:
+ item_record[key] = item.get(key)
+ elif docker_object == 'images':
+ for key in header_images:
+ item_record[key] = item.get(key)
+ elif docker_object == 'volumes':
+ for key in header_volumes:
+ item_record[key] = item.get(key)
+ items_list.append(item_record)
+
+ return items_list
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_all=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ disk_usage=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerHostManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_image.py b/ansible_collections/community/docker/plugins/modules/docker_image.py
new file mode 100644
index 000000000..735de786a
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_image.py
@@ -0,0 +1,1033 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image
+
+short_description: Manage docker images
+
+description:
+ - Build, load or pull an image, making the image available for creating containers. Also supports tagging
+ an image, pushing an image, and archiving an image to a C(.tar) file.
+
+notes:
+ - Building images is done using Docker daemon's API. It is not possible to use BuildKit / buildx this way.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: partial
+ details:
+ - When trying to pull an image, the module assumes this is always changed in check mode.
+ diff_mode:
+ support: none
+
+options:
+ source:
+ description:
+ - "Determines where the module will try to retrieve the image from."
+ - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
+ be specified when this value is used."
+ - "Use C(load) to load the image from a C(.tar) file. I(load_path) must
+ be specified when this value is used."
+ - "Use C(pull) to pull the image from a registry."
+ - "Use C(local) to make sure that the image is already available on the local
+ docker daemon. This means that the module does not try to build, pull or load the image."
+ type: str
+ choices:
+ - build
+ - load
+ - pull
+ - local
+ build:
+ description:
+ - "Specifies options used for building images."
+ type: dict
+ suboptions:
+ cache_from:
+ description:
+ - List of image names to consider as cache source.
+ type: list
+ elements: str
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ type: str
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ type: int
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ type: path
+ required: true
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ type: bool
+ default: false
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ type: bool
+ default: true
+ network:
+ description:
+ - The network to use for C(RUN) build instructions.
+ type: str
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ type: bool
+ default: false
+ etc_hosts:
+ description:
+ - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
+ type: dict
+ args:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap).
+ - Use C(-1) to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution.
+ - For example, C(0-3) or C(0,1).
+ type: str
+ use_config_proxy:
+ description:
+ - If set to C(true) and a proxy configuration is specified in the docker client configuration
+ (by default C($HOME/.docker/config.json)), the corresponding environment variables will
+ be set in the container being built.
+ type: bool
+ target:
+ description:
+ - When building an image specifies an intermediate build stage by
+ name as a final stage for the resulting image.
+ type: str
+ platform:
+ description:
+ - Platform in the format C(os[/arch[/variant]]).
+ type: str
+ version_added: 1.1.0
+ archive_path:
+ description:
+ - Use with state C(present) to archive an image to a .tar file.
+ type: path
+ load_path:
+ description:
+ - Use with state C(present) to load an image from a .tar file.
+ - Set I(source) to C(load) if you want to load the image.
+ type: path
+ force_source:
+ description:
+ - Use with state C(present) to build, load or pull an image (depending on the
+ value of the I(source) option) when the image already exists.
+ type: bool
+ default: false
+ force_absent:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name.
+ type: bool
+ default: false
+ force_tag:
+ description:
+ - Use with state C(present) to force tagging an image.
+ type: bool
+ default: false
+ name:
+ description:
+ - "Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name).
+ When pushing or pulling an image the name can optionally include the tag by appending C(:tag_name)."
+ - Note that image IDs (hashes) are only supported for I(state=absent), for I(state=present) with I(source=load),
+ and for I(state=present) with I(source=local).
+ type: str
+ required: true
+ pull:
+ description:
+ - "Specifies options used for pulling images."
+ type: dict
+ version_added: 1.3.0
+ suboptions:
+ platform:
+ description:
+ - When pulling an image, ask for this specific platform.
+ - Note that this value is not used to determine whether the image needs to be pulled. This might change
+ in the future in a minor release, though.
+ type: str
+ push:
+ description:
+ - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
+ type: bool
+ default: false
+ repository:
+ description:
+ - Use with state C(present) to tag the image.
+ - Expects format C(repository:tag). If no tag is provided, will use the value of the I(tag) parameter or C(latest).
+ - If I(push=true), I(repository) must either include a registry, or will be assumed to belong to the default
+ registry (Docker Hub).
+ type: str
+ state:
+ description:
+ - Make assertions about the state of an image.
+ - When C(absent) an image will be removed. Use the force option to un-tag and remove all images
+ matching the provided name.
+ - When C(present) check if an image exists using the provided name and tag. If the image is not found or the
+ force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ tag:
+ description:
+ - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
+ I(latest).
+ - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
+ type: str
+ default: latest
+
+requirements:
+ - "Docker API >= 1.25"
+
+author:
+ - Pavel Antonov (@softzilla)
+ - Chris Houseknecht (@chouseknecht)
+ - Sorin Sbarnea (@ssbarnea)
+
+'''
+
+EXAMPLES = '''
+
+- name: Pull an image
+ community.docker.docker_image:
+ name: pacur/centos-7
+ source: pull
+ # Select platform for pulling. If not specified, will pull whatever docker prefers.
+ pull:
+ platform: amd64
+
+- name: Tag and push to docker hub
+ community.docker.docker_image:
+ name: pacur/centos-7:56
+ repository: dcoppenhagan/myimage:7.56
+ push: true
+ source: local
+
+- name: Tag and push to local registry
+ community.docker.docker_image:
+ # Image will be centos:7
+ name: centos
+ # Will be pushed to localhost:5000/centos:7
+ repository: localhost:5000/centos
+ tag: 7
+ push: true
+ source: local
+
+- name: Add tag latest to image
+ community.docker.docker_image:
+ name: myimage:7.1.2
+ repository: myimage:latest
+ # As 'latest' usually already is present, we need to enable overwriting of existing tags:
+ force_tag: true
+ source: local
+
+- name: Remove image
+ community.docker.docker_image:
+ state: absent
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+
+- name: Build an image and push it to a private repo
+ community.docker.docker_image:
+ build:
+ path: ./sinatra
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ push: true
+ source: build
+
+- name: Archive image
+ community.docker.docker_image:
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ archive_path: my_sinatra.tar
+ source: local
+
+- name: Load image from archive and push to a private registry
+ community.docker.docker_image:
+ name: localhost:5000/myimages/sinatra
+ tag: v1
+ push: true
+ load_path: my_sinatra.tar
+ source: load
+
+- name: Build image and with build args
+ community.docker.docker_image:
+ name: myimage
+ build:
+ path: /path/to/build/dir
+ args:
+ log_volume: /var/log/myapp
+ listen_port: 8080
+ source: build
+
+- name: Build image using cache source
+ community.docker.docker_image:
+ name: myimage:latest
+ build:
+ path: /path/to/build/dir
+ # Use as cache source for building myimage
+ cache_from:
+ - nginx:latest
+ - alpine:3.8
+ source: build
+'''
+
+RETURN = '''
+image:
+ description: Image inspection results for the affected image.
+ returned: success
+ type: dict
+ sample: {}
+stdout:
+ description: Docker build output when building an image.
+ returned: success
+ type: str
+ sample: ""
+ version_added: 1.0.0
+'''
+
+import errno
+import json
+import os
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.image_archive import (
+ archived_image_manifest,
+ api_image_id,
+ ImageArchiveInvalidException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ clean_dict_booleans_for_docker_api,
+ DockerBaseClass,
+ is_image_name_id,
+ is_valid_tag,
+)
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils._api.auth import (
+ get_config_header,
+ resolve_repository_name,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.constants import (
+ DEFAULT_DATA_CHUNK_SIZE,
+ CONTAINER_LIMITS_KEYS,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound
+from ansible_collections.community.docker.plugins.module_utils._api.utils.build import (
+ process_dockerfile,
+ tar,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import (
+ format_extra_hosts,
+ parse_repository_tag,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+ '''
+ Configure a docker_image task.
+
+ :param client: Ansible Docker Client wrapper over Docker client
+ :type client: AnsibleDockerClient
+ :param results: This task adds its output values to this dictionary
+ :type results: dict
+ '''
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.source = parameters['source']
+ build = parameters['build'] or dict()
+ pull = parameters['pull'] or dict()
+ self.archive_path = parameters['archive_path']
+ self.cache_from = build.get('cache_from')
+ self.container_limits = build.get('container_limits')
+ self.dockerfile = build.get('dockerfile')
+ self.force_source = parameters['force_source']
+ self.force_absent = parameters['force_absent']
+ self.force_tag = parameters['force_tag']
+ self.load_path = parameters['load_path']
+ self.name = parameters['name']
+ self.network = build.get('network')
+ self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts'))
+ self.nocache = build.get('nocache', False)
+ self.build_path = build.get('path')
+ self.pull = build.get('pull')
+ self.target = build.get('target')
+ self.repository = parameters['repository']
+ self.rm = build.get('rm', True)
+ self.state = parameters['state']
+ self.tag = parameters['tag']
+ self.http_timeout = build.get('http_timeout')
+ self.pull_platform = pull.get('platform')
+ self.push = parameters['push']
+ self.buildargs = build.get('args')
+ self.build_platform = build.get('platform')
+ self.use_config_proxy = build.get('use_config_proxy')
+
+ # If name contains a tag, it takes precedence over tag parameter.
+ if not is_image_name_id(self.name):
+ repo, repo_tag = parse_repository_tag(self.name)
+ if repo_tag:
+ self.name = repo
+ self.tag = repo_tag
+
+ # Sanity check: fail early when we know that something will fail later
+ if self.repository and is_image_name_id(self.repository):
+ self.fail("`repository` must not be an image ID; got: %s" % self.repository)
+ if not self.repository and self.push and is_image_name_id(self.name):
+ self.fail("Cannot push an image by ID; specify `repository` to tag and push the image with ID %s instead" % self.name)
+
+ if self.state == 'present':
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def present(self):
+ '''
+ Handles state = 'present', which includes building, loading or pulling an image,
+ depending on user provided parameters.
+
+ :returns None
+ '''
+ if is_image_name_id(self.name):
+ image = self.client.find_image_by_id(self.name, accept_missing_image=True)
+ else:
+ image = self.client.find_image(name=self.name, tag=self.tag)
+
+ if not image or self.force_source:
+ if self.source == 'build':
+ if is_image_name_id(self.name):
+ self.fail("Image name must not be an image ID for source=build; got: %s" % self.name)
+
+ # Build the image
+ if not os.path.isdir(self.build_path):
+ self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.log("Building image %s" % image_name)
+ self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results.update(self.build_image())
+
+ elif self.source == 'load':
+ # Load the image from an archive
+ if not os.path.isfile(self.load_path):
+ self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
+ self.load_path))
+ image_name = self.name
+ if self.tag and not is_image_name_id(image_name):
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'] = self.load_image()
+ elif self.source == 'pull':
+ if is_image_name_id(self.name):
+ self.fail("Image name must not be an image ID for source=pull; got: %s" % self.name)
+
+ # pull the image
+ self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag, platform=self.pull_platform)
+ elif self.source == 'local':
+ if image is None:
+ name = self.name
+ if self.tag and not is_image_name_id(name):
+ name = "%s:%s" % (self.name, self.tag)
+ self.client.fail('Cannot find the image %s locally.' % name)
+ if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+ else:
+ self.results['image'] = image
+
+ if self.archive_path:
+ self.archive_image(self.name, self.tag)
+
+ if self.push and not self.repository:
+ self.push_image(self.name, self.tag)
+ elif self.repository:
+ self.tag_image(self.name, self.tag, self.repository, push=self.push)
+
+ def absent(self):
+ '''
+ Handles state = 'absent', which removes an image.
+
+ :return None
+ '''
+ name = self.name
+ if is_image_name_id(name):
+ image = self.client.find_image_by_id(name, accept_missing_image=True)
+ else:
+ image = self.client.find_image(name, self.tag)
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ if image:
+ if not self.check_mode:
+ try:
+ self.client.delete_json('/images/{0}', name, params={'force': self.force_absent})
+ except NotFound:
+ # If the image vanished while we were trying to remove it, don't fail
+ pass
+ except Exception as exc:
+ self.fail("Error removing image %s - %s" % (name, to_native(exc)))
+
+ self.results['changed'] = True
+ self.results['actions'].append("Removed image %s" % (name))
+ self.results['image']['state'] = 'Deleted'
+
+ @staticmethod
+ def archived_image_action(failure_logger, archive_path, current_image_name, current_image_id):
+ '''
+ If the archive is missing or requires replacement, return an action message.
+
+ :param failure_logger: a logging function that accepts one parameter of type str
+ :type failure_logger: Callable
+ :param archive_path: Filename to write archive to
+ :type archive_path: str
+ :param current_image_name: repo:tag
+ :type current_image_name: str
+ :param current_image_id: Hash, including hash type prefix such as "sha256:"
+ :type current_image_id: str
+
+ :returns: Either None, or an Ansible action message.
+ :rtype: str
+ '''
+
+ def build_msg(reason):
+ return 'Archived image %s to %s, %s' % (current_image_name, archive_path, reason)
+
+ try:
+ archived = archived_image_manifest(archive_path)
+ except ImageArchiveInvalidException as exc:
+ failure_logger('Unable to extract manifest summary from archive: %s' % to_native(exc))
+ return build_msg('overwriting an unreadable archive file')
+
+ if archived is None:
+ return build_msg('since none present')
+ elif current_image_id == api_image_id(archived.image_id) and [current_image_name] == archived.repo_tags:
+ return None
+ else:
+ name = ', '.join(archived.repo_tags)
+
+ return build_msg('overwriting archive with image %s named %s' % (archived.image_id, name))
+
+ def archive_image(self, name, tag):
+ '''
+ Archive an image to a .tar file. Called when archive_path is passed.
+
+ :param name: Name/repository of the image
+ :type name: str
+ :param tag: Optional image tag; assumed to be "latest" if None
+ :type tag: str | None
+ '''
+
+ if not tag:
+ tag = "latest"
+
+ if is_image_name_id(name):
+ image = self.client.find_image_by_id(name, accept_missing_image=True)
+ image_name = name
+ else:
+ image = self.client.find_image(name=name, tag=tag)
+ image_name = "%s:%s" % (name, tag)
+
+ if not image:
+ self.log("archive image: image %s not found" % image_name)
+ return
+
+ # Will have a 'sha256:' prefix
+ image_id = image['Id']
+
+ action = self.archived_image_action(self.client.module.debug, self.archive_path, image_name, image_id)
+
+ if action:
+ self.results['actions'].append(action)
+
+ self.results['changed'] = action is not None
+
+ if (not self.check_mode) and self.results['changed']:
+ self.log("Getting archive of image %s" % image_name)
+ try:
+ saved_image = self.client._stream_raw_result(
+ self.client._get(self.client._url('/images/{0}/get', image_name), stream=True),
+ DEFAULT_DATA_CHUNK_SIZE,
+ False,
+ )
+ except Exception as exc:
+ self.fail("Error getting image %s - %s" % (image_name, to_native(exc)))
+
+ try:
+ with open(self.archive_path, 'wb') as fd:
+ for chunk in saved_image:
+ fd.write(chunk)
+ except Exception as exc:
+ self.fail("Error writing image archive %s - %s" % (self.archive_path, to_native(exc)))
+
+ self.results['image'] = image
+
+ def push_image(self, name, tag=None):
+ '''
+ If the name of the image contains a repository path, then push the image.
+
+ :param name Name of the image to push.
+ :param tag Use a specific tag.
+ :return: None
+ '''
+
+ if is_image_name_id(name):
+ self.fail("Cannot push an image ID: %s" % name)
+
+ repository = name
+ if not tag:
+ repository, tag = parse_repository_tag(name)
+ registry, repo_name = resolve_repository_name(repository)
+
+ self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+
+ if registry:
+ self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ status = None
+ try:
+ changed = False
+
+ push_repository, push_tag = repository, tag
+ if not push_tag:
+ push_repository, push_tag = parse_repository_tag(push_repository)
+ push_registry, dummy = resolve_repository_name(push_repository)
+ headers = {}
+ header = get_config_header(self.client, push_registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ response = self.client._post_json(
+ self.client._url("/images/{0}/push", push_repository),
+ data=None,
+ headers=headers,
+ stream=True,
+ params={'tag': push_tag},
+ )
+ self.client._raise_for_status(response)
+ for line in self.client._stream_helper(response, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('errorDetail'):
+ raise Exception(line['errorDetail']['message'])
+ status = line.get('status')
+ if status == 'Pushing':
+ changed = True
+ self.results['changed'] = changed
+ except Exception as exc:
+ if 'unauthorized' in str(exc):
+ if 'authentication required' in str(exc):
+ self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
+ (registry, repo_name, tag, to_native(exc), registry))
+ else:
+ self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
+ (registry, repo_name, tag, str(exc)))
+ self.fail("Error pushing image %s: %s" % (repository, to_native(exc)))
+ self.results['image'] = self.client.find_image(name=repository, tag=tag)
+ if not self.results['image']:
+ self.results['image'] = dict()
+ self.results['image']['push_status'] = status
+
+ def tag_image(self, name, tag, repository, push=False):
+ '''
+ Tag an image into a repository.
+
+ :param name: name of the image. required.
+ :param tag: image tag.
+ :param repository: path to the repository. required.
+ :param push: bool. push the image once it's tagged.
+ :return: None
+ '''
+ repo, repo_tag = parse_repository_tag(repository)
+ if not repo_tag:
+ repo_tag = "latest"
+ if tag:
+ repo_tag = tag
+ image = self.client.find_image(name=repo, tag=repo_tag)
+ found = 'found' if image else 'not found'
+ self.log("image %s was %s" % (repo, found))
+
+ if not image or self.force_tag:
+ image_name = name
+ if not is_image_name_id(name) and tag and not name.endswith(':' + tag):
+ image_name = "%s:%s" % (name, tag)
+ self.log("tagging %s to %s:%s" % (image_name, repo, repo_tag))
+ self.results['changed'] = True
+ self.results['actions'].append("Tagged image %s to %s:%s" % (image_name, repo, repo_tag))
+ if not self.check_mode:
+ try:
+ # Finding the image does not always work, especially running a localhost registry. In those
+ # cases, if we don't set force=True, it errors.
+ params = {
+ 'tag': repo_tag,
+ 'repo': repo,
+ 'force': True,
+ }
+ res = self.client._post(self.client._url('/images/{0}/tag', image_name), params=params)
+ self.client._raise_for_status(res)
+ if res.status_code != 201:
+ raise Exception("Tag operation failed.")
+ except Exception as exc:
+ self.fail("Error: failed to tag image - %s" % to_native(exc))
+ self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
+ if image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if push:
+ self.push_image(repo, repo_tag)
+
+ @staticmethod
+ def _extract_output_line(line, output):
+ '''
+ Extract text line from stream output and, if found, adds it to output.
+ '''
+ if 'stream' in line or 'status' in line:
+ # Make sure we have a string (assuming that line['stream'] and
+ # line['status'] are either not defined, falsish, or a string)
+ text_line = line.get('stream') or line.get('status') or ''
+ output.extend(text_line.splitlines())
+
+ def build_image(self):
+ '''
+ Build an image
+
+ :return: image dict
+ '''
+ remote = context = None
+ headers = {}
+ buildargs = {}
+ if self.buildargs:
+ for key, value in self.buildargs.items():
+ buildargs[key] = to_native(value)
+
+ container_limits = self.container_limits or {}
+ for key in container_limits.keys():
+ if key not in CONTAINER_LIMITS_KEYS:
+ raise DockerException('Invalid container_limits key {key}'.format(key=key))
+
+ dockerfile = self.dockerfile
+ if self.build_path.startswith(('http://', 'https://', 'git://', 'github.com/', 'git@')):
+ remote = self.build_path
+ elif not os.path.isdir(self.build_path):
+ raise TypeError("You must specify a directory to build in path")
+ else:
+ dockerignore = os.path.join(self.build_path, '.dockerignore')
+ exclude = None
+ if os.path.exists(dockerignore):
+ with open(dockerignore) as f:
+ exclude = list(filter(
+ lambda x: x != '' and x[0] != '#',
+ [line.strip() for line in f.read().splitlines()]
+ ))
+ dockerfile = process_dockerfile(dockerfile, self.build_path)
+ context = tar(self.build_path, exclude=exclude, dockerfile=dockerfile, gzip=False)
+
+ params = {
+ 't': "%s:%s" % (self.name, self.tag) if self.tag else self.name,
+ 'remote': remote,
+ 'q': False,
+ 'nocache': self.nocache,
+ 'rm': self.rm,
+ 'forcerm': self.rm,
+ 'pull': self.pull,
+ 'dockerfile': dockerfile,
+ }
+ params.update(container_limits)
+
+ if self.use_config_proxy:
+ proxy_args = self.client._proxy_configs.get_environment()
+ for k, v in proxy_args.items():
+ buildargs.setdefault(k, v)
+ if buildargs:
+ params.update({'buildargs': json.dumps(buildargs)})
+
+ if self.cache_from:
+ params.update({'cachefrom': json.dumps(self.cache_from)})
+
+ if self.target:
+ params.update({'target': self.target})
+
+ if self.network:
+ params.update({'networkmode': self.network})
+
+ if self.extra_hosts is not None:
+ params.update({'extrahosts': format_extra_hosts(self.extra_hosts)})
+
+ if self.build_platform is not None:
+ params['platform'] = self.build_platform
+
+ if context is not None:
+ headers['Content-Type'] = 'application/tar'
+
+ self.client._set_auth_headers(headers)
+
+ response = self.client._post(
+ self.client._url('/build'),
+ data=context,
+ params=params,
+ headers=headers,
+ stream=True,
+ timeout=self.http_timeout,
+ )
+
+ if context is not None:
+ context.close()
+
+ build_output = []
+ for line in self.client._stream_helper(response, decode=True):
+ # line = json.loads(line)
+ self.log(line, pretty_print=True)
+ self._extract_output_line(line, build_output)
+
+ if line.get('error'):
+ if line.get('errorDetail'):
+ errorDetail = line.get('errorDetail')
+ self.fail(
+ "Error building %s - code: %s, message: %s, logs: %s" % (
+ self.name,
+ errorDetail.get('code'),
+ errorDetail.get('message'),
+ build_output))
+ else:
+ self.fail("Error building %s - message: %s, logs: %s" % (
+ self.name, line.get('error'), build_output))
+
+ return {
+ "stdout": "\n".join(build_output),
+ "image": self.client.find_image(name=self.name, tag=self.tag),
+ }
+
+ def load_image(self):
+ '''
+ Load an image from a .tar archive
+
+ :return: image dict
+ '''
+ # Load image(s) from file
+ load_output = []
+ has_output = False
+ try:
+ self.log("Opening image %s" % self.load_path)
+ with open(self.load_path, 'rb') as image_tar:
+ self.log("Loading image from %s" % self.load_path)
+ res = self.client._post(self.client._url("/images/load"), data=image_tar, stream=True)
+ if LooseVersion(self.client.api_version) >= LooseVersion('1.23'):
+ has_output = True
+ for line in self.client._stream_helper(res, decode=True):
+ self.log(line, pretty_print=True)
+ self._extract_output_line(line, load_output)
+ else:
+ self.client._raise_for_status(res)
+ self.client.module.warn(
+ 'The API version of your Docker daemon is < 1.23, which does not return the image'
+ ' loading result from the Docker daemon. Therefore, we cannot verify whether the'
+ ' expected image was loaded, whether multiple images where loaded, or whether the load'
+ ' actually succeeded. You should consider upgrading your Docker daemon.'
+ )
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ self.client.fail("Error opening image %s - %s" % (self.load_path, to_native(exc)))
+ self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output))
+ except Exception as exc:
+ self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output))
+
+ # Collect loaded images
+ if has_output:
+ # We can only do this when we actually got some output from Docker daemon
+ loaded_images = set()
+ loaded_image_ids = set()
+ for line in load_output:
+ if line.startswith('Loaded image:'):
+ loaded_images.add(line[len('Loaded image:'):].strip())
+ if line.startswith('Loaded image ID:'):
+ loaded_image_ids.add(line[len('Loaded image ID:'):].strip().lower())
+
+ if not loaded_images and not loaded_image_ids:
+ self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
+
+ if is_image_name_id(self.name):
+ expected_image = self.name.lower()
+ found_image = expected_image not in loaded_image_ids
+ else:
+ expected_image = '%s:%s' % (self.name, self.tag)
+ found_image = expected_image not in loaded_images
+ if found_image:
+ self.client.fail(
+ "The archive did not contain image '%s'. Instead, found %s." % (
+ expected_image,
+ ', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids)))),
+ stdout='\n'.join(load_output))
+ loaded_images.remove(expected_image)
+
+ if loaded_images:
+ self.client.module.warn(
+ "The archive contained more images than specified: %s" % (
+ ', '.join(sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids))), ))
+
+ if is_image_name_id(self.name):
+ return self.client.find_image_by_id(self.name, accept_missing_image=True)
+ else:
+ return self.client.find_image(self.name, self.tag)
+
+
+def main():
+ argument_spec = dict(
+ source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
+ build=dict(type='dict', options=dict(
+ cache_from=dict(type='list', elements='str'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ )),
+ dockerfile=dict(type='str'),
+ http_timeout=dict(type='int'),
+ network=dict(type='str'),
+ nocache=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ pull=dict(type='bool', default=False),
+ rm=dict(type='bool', default=True),
+ args=dict(type='dict'),
+ use_config_proxy=dict(type='bool'),
+ target=dict(type='str'),
+ etc_hosts=dict(type='dict'),
+ platform=dict(type='str'),
+ )),
+ archive_path=dict(type='path'),
+ force_source=dict(type='bool', default=False),
+ force_absent=dict(type='bool', default=False),
+ force_tag=dict(type='bool', default=False),
+ load_path=dict(type='path'),
+ name=dict(type='str', required=True),
+ pull=dict(type='dict', options=dict(
+ platform=dict(type='str'),
+ )),
+ push=dict(type='bool', default=False),
+ repository=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ tag=dict(type='str', default='latest'),
+ )
+
+ required_if = [
+ ('state', 'present', ['source']),
+ ('source', 'build', ['build']),
+ ('source', 'load', ['load_path']),
+ ]
+
+ def detect_etc_hosts(client):
+ return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts'))
+
+ def detect_build_platform(client):
+ return client.module.params['build'] and client.module.params['build'].get('platform') is not None
+
+ def detect_pull_platform(client):
+ return client.module.params['pull'] and client.module.params['pull'].get('platform') is not None
+
+ option_minimal_versions = dict()
+ option_minimal_versions["build.etc_hosts"] = dict(docker_api_version='1.27', detect_usage=detect_etc_hosts)
+ option_minimal_versions["build.platform"] = dict(docker_api_version='1.32', detect_usage=detect_build_platform)
+ option_minimal_versions["pull.platform"] = dict(docker_api_version='1.32', detect_usage=detect_pull_platform)
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ if not is_valid_tag(client.module.params['tag'], allow_empty=True):
+ client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
+
+ if client.module.params['source'] == 'build':
+ if not client.module.params['build'] or not client.module.params['build'].get('path'):
+ client.fail('If "source" is set to "build", the "build.path" option must be specified.')
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ image={}
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_info.py b/ansible_collections/community/docker/plugins/modules/docker_image_info.py
new file mode 100644
index 000000000..e4f480b1c
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_image_info.py
@@ -0,0 +1,273 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.docker.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+requirements:
+ - "Docker API >= 1.25"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.docker.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.docker.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ is_image_name_id,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import parse_repository_tag
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name, accept_missing_image=True)
+ else:
+ repository, tag = parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ params = {
+ 'only_ids': 0,
+ 'all': 0,
+ }
+ images = self.client.get_json("/images/json", params=params)
+ for image in images:
+ try:
+ inspection = self.client.get_json('/images/{0}/json', image['Id'])
+ except NotFound:
+ inspection = None
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], to_native(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_load.py b/ansible_collections/community/docker/plugins/modules/docker_image_load.py
new file mode 100644
index 000000000..880ae4e4c
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_image_load.py
@@ -0,0 +1,194 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_load
+
+short_description: Load docker image(s) from archives
+
+version_added: 1.3.0
+
+description:
+ - Load one or multiple Docker images from a C(.tar) archive, and return information on
+ the loaded image(s).
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ path:
+ description:
+ - The path to the C(.tar) archive to load Docker image(s) from.
+ type: path
+ required: true
+
+notes:
+ - Does not support C(check_mode).
+
+requirements:
+ - "Docker API >= 1.25"
+
+author:
+ - Felix Fontein (@felixfontein)
+'''
+
+EXAMPLES = '''
+- name: Load all image(s) from the given tar file
+ community.docker.docker_image_load:
+ path: /path/to/images.tar
+ register: result
+
+- name: Print the loaded image names
+ ansible.builtin.debug:
+ msg: "Loaded the following images: {{ result.image_names | join(', ') }}"
+'''
+
+RETURN = '''
+image_names:
+ description: List of image names and IDs loaded from the archive.
+ returned: success
+ type: list
+ elements: str
+ sample:
+ - 'hello-world:latest'
+ - 'sha256:e004c2cc521c95383aebb1fb5893719aa7a8eae2e7a71f316a4410784edb00a9'
+images:
+ description: Image inspection results for the loaded images.
+ returned: success
+ type: list
+ elements: dict
+ sample: []
+'''
+
+import errno
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ is_image_name_id,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+class ImageManager(DockerBaseClass):
+ def __init__(self, client, results):
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.path = parameters['path']
+
+ self.load_images()
+
+ @staticmethod
+ def _extract_output_line(line, output):
+ '''
+ Extract text line from stream output and, if found, adds it to output.
+ '''
+ if 'stream' in line or 'status' in line:
+ # Make sure we have a string (assuming that line['stream'] and
+ # line['status'] are either not defined, falsish, or a string)
+ text_line = line.get('stream') or line.get('status') or ''
+ output.extend(text_line.splitlines())
+
+ def load_images(self):
+ '''
+ Load images from a .tar archive
+ '''
+ # Load image(s) from file
+ load_output = []
+ try:
+ self.log("Opening image {0}".format(self.path))
+ with open(self.path, 'rb') as image_tar:
+ self.log("Loading images from {0}".format(self.path))
+ res = self.client._post(self.client._url("/images/load"), data=image_tar, stream=True)
+ for line in self.client._stream_helper(res, decode=True):
+ self.log(line, pretty_print=True)
+ self._extract_output_line(line, load_output)
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ self.client.fail("Error opening archive {0} - {1}".format(self.path, to_native(exc)))
+ self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
+ except Exception as exc:
+ self.client.fail("Error loading archive {0} - {1}".format(self.path, to_native(exc)), stdout='\n'.join(load_output))
+
+ # Collect loaded images
+ loaded_images = []
+ for line in load_output:
+ if line.startswith('Loaded image:'):
+ loaded_images.append(line[len('Loaded image:'):].strip())
+ if line.startswith('Loaded image ID:'):
+ loaded_images.append(line[len('Loaded image ID:'):].strip())
+
+ if not loaded_images:
+ self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
+
+ images = []
+ for image_name in loaded_images:
+ if is_image_name_id(image_name):
+ images.append(self.client.find_image_by_id(image_name))
+ elif ':' in image_name:
+ image_name, tag = image_name.rsplit(':', 1)
+ images.append(self.client.find_image(image_name, tag))
+ else:
+ self.client.module.warn('Image name "{0}" is neither ID nor has a tag'.format(image_name))
+
+ self.results['image_names'] = loaded_images
+ self.results['images'] = images
+ self.results['changed'] = True
+ self.results['stdout'] = '\n'.join(load_output)
+
+
+def main():
+ client = AnsibleDockerClient(
+ argument_spec=dict(
+ path=dict(type='path', required=True),
+ ),
+ supports_check_mode=False,
+ )
+
+ try:
+ results = dict(
+ image_names=[],
+ images=[],
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_login.py b/ansible_collections/community/docker/plugins/modules/docker_login.py
new file mode 100644
index 000000000..360dd5785
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_login.py
@@ -0,0 +1,451 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
+# Chris Houseknecht, <house@redhat.com>
+# James Tanner, <jtanner@redhat.com>
+#
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_login
+short_description: Log into a Docker registry.
+description:
+ - Provides functionality similar to the C(docker login) command.
+ - Authenticate with a docker registry and add the credentials to your local Docker config file respectively the
+ credentials store associated to the registry. Adding the credentials to the config files resp. the credential
+ store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI
+ and Docker SDK for Python without needing to provide credentials.
+ - Running in check mode will perform the authentication without updating the config file.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ registry_url:
+ description:
+ - The registry URL.
+ type: str
+ default: "https://index.docker.io/v1/"
+ aliases:
+ - registry
+ - url
+ username:
+ description:
+ - The username for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ password:
+ description:
+ - The plaintext password for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ reauthorize:
+ description:
+ - Refresh existing authentication found in the configuration file.
+ type: bool
+ default: false
+ aliases:
+ - reauth
+ config_path:
+ description:
+ - Custom path to the Docker CLI configuration file.
+ type: path
+ default: ~/.docker/config.json
+ aliases:
+ - dockercfg_path
+ state:
+ description:
+ - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
+ - To logout you only need the registry server, which defaults to DockerHub.
+ - Before 2.1 you could ONLY log in.
+ - Docker does not support 'logout' with a custom config file.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+
+requirements:
+ - "Docker API >= 1.25"
+author:
+ - Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Log into DockerHub
+ community.docker.docker_login:
+ username: docker
+ password: rekcod
+
+- name: Log into private registry and force re-authorization
+ community.docker.docker_login:
+ registry_url: your.private.registry.io
+ username: yourself
+ password: secrets3
+ reauthorize: true
+
+- name: Log into DockerHub using a custom config file
+ community.docker.docker_login:
+ username: docker
+ password: rekcod
+ config_path: /tmp/.mydockercfg
+
+- name: Log out of DockerHub
+ community.docker.docker_login:
+ state: absent
+'''
+
+RETURN = '''
+login_results:
+ description: Results from the login.
+ returned: when I(state=present)
+ type: dict
+ sample: {
+ "serveraddress": "localhost:5000",
+ "username": "testuser"
+ }
+'''
+
+import base64
+import json
+import os
+import traceback
+
+from ansible.module_utils.common.text.converters import to_bytes, to_text, to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DEFAULT_DOCKER_REGISTRY,
+ DockerBaseClass,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api import auth
+from ansible_collections.community.docker.plugins.module_utils._api.auth import decode_auth
+from ansible_collections.community.docker.plugins.module_utils._api.credentials.errors import CredentialsNotFound
+from ansible_collections.community.docker.plugins.module_utils._api.credentials.store import Store
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+class DockerFileStore(object):
+ '''
+ A custom credential store class that implements only the functionality we need to
+ update the docker config file when no credential helpers is provided.
+ '''
+
+ program = "<legacy config>"
+
+ def __init__(self, config_path):
+ self._config_path = config_path
+
+ # Make sure we have a minimal config if none is available.
+ self._config = dict(
+ auths=dict()
+ )
+
+ try:
+ # Attempt to read the existing config.
+ with open(self._config_path, "r") as f:
+ config = json.load(f)
+ except (ValueError, IOError):
+ # No config found or an invalid config found so we'll ignore it.
+ config = dict()
+
+ # Update our internal config with what ever was loaded.
+ self._config.update(config)
+
+ @property
+ def config_path(self):
+ '''
+ Return the config path configured in this DockerFileStore instance.
+ '''
+
+ return self._config_path
+
+ def get(self, server):
+ '''
+ Retrieve credentials for `server` if there are any in the config file.
+ Otherwise raise a `StoreError`
+ '''
+
+ server_creds = self._config['auths'].get(server)
+ if not server_creds:
+ raise CredentialsNotFound('No matching credentials')
+
+ (username, password) = decode_auth(server_creds['auth'])
+
+ return dict(
+ Username=username,
+ Secret=password
+ )
+
+ def _write(self):
+ '''
+ Write config back out to disk.
+ '''
+ # Make sure directory exists
+ dir = os.path.dirname(self._config_path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ # Write config; make sure it has permissions 0x600
+ content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
+ f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ try:
+ os.write(f, content)
+ finally:
+ os.close(f)
+
+ def store(self, server, username, password):
+ '''
+ Add a credentials for `server` to the current configuration.
+ '''
+
+ b64auth = base64.b64encode(
+ to_bytes(username) + b':' + to_bytes(password)
+ )
+ auth = to_text(b64auth)
+
+ # build up the auth structure
+ if 'auths' not in self._config:
+ self._config['auths'] = dict()
+
+ self._config['auths'][server] = dict(
+ auth=auth
+ )
+
+ self._write()
+
+ def erase(self, server):
+ '''
+ Remove credentials for the given server from the configuration.
+ '''
+
+ if 'auths' in self._config and server in self._config['auths']:
+ self._config['auths'].pop(server)
+ self._write()
+
+
+class LoginManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(LoginManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.registry_url = parameters.get('registry_url')
+ self.username = parameters.get('username')
+ self.password = parameters.get('password')
+ self.reauthorize = parameters.get('reauthorize')
+ self.config_path = parameters.get('config_path')
+ self.state = parameters.get('state')
+
+ def run(self):
+ '''
+ Do the actuall work of this task here. This allows instantiation for partial
+ testing.
+ '''
+
+ if self.state == 'present':
+ self.login()
+ else:
+ self.logout()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def _login(self, reauth):
+ if self.config_path and os.path.exists(self.config_path):
+ self.client._auth_configs = auth.load_config(
+ self.config_path, credstore_env=self.client.credstore_env
+ )
+ elif not self.client._auth_configs or self.client._auth_configs.is_empty:
+ self.client._auth_configs = auth.load_config(
+ credstore_env=self.client.credstore_env
+ )
+
+ authcfg = self.client._auth_configs.resolve_authconfig(self.registry_url)
+ # If we found an existing auth config for this registry and username
+ # combination, we can return it immediately unless reauth is requested.
+ if authcfg and authcfg.get('username', None) == self.username \
+ and not reauth:
+ return authcfg
+
+ req_data = {
+ 'username': self.username,
+ 'password': self.password,
+ 'email': None,
+ 'serveraddress': self.registry_url,
+ }
+
+ response = self.client._post_json(self.client._url('/auth'), data=req_data)
+ if response.status_code == 200:
+ self.client._auth_configs.add_auth(self.registry_url or auth.INDEX_NAME, req_data)
+ return self.client._result(response, json=True)
+
+ def login(self):
+ '''
+ Log into the registry with provided username/password. On success update the config
+ file with the new authorization.
+
+ :return: None
+ '''
+
+ self.results['actions'].append("Logged into %s" % (self.registry_url))
+ self.log("Log into %s with username %s" % (self.registry_url, self.username))
+ try:
+ response = self._login(self.reauthorize)
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
+
+ # If user is already logged in, then response contains password for user
+ if 'password' in response:
+ # This returns correct password if user is logged in and wrong password is given.
+ # So if it returns another password as we passed, and the user didn't request to
+ # reauthorize, still do it.
+ if not self.reauthorize and response['password'] != self.password:
+ try:
+ response = self._login(True)
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, to_native(exc)))
+ response.pop('password', None)
+ self.results['login_result'] = response
+
+ self.update_credentials()
+
+ def logout(self):
+ '''
+ Log out of the registry. On success update the config file.
+
+ :return: None
+ '''
+
+ # Get the configuration store.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
+ self.results['changed'] = False
+ return
+
+ if not self.check_mode:
+ store.erase(self.registry_url)
+ self.results['changed'] = True
+
+ def update_credentials(self):
+ '''
+ If the authorization is not stored attempt to store authorization values via
+ the appropriate credential helper or to the config file.
+
+ :return: None
+ '''
+
+ # Check to see if credentials already exist.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ current = dict(
+ Username='',
+ Secret=''
+ )
+
+ if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
+ if not self.check_mode:
+ store.store(self.registry_url, self.username, self.password)
+ self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
+ self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
+ store.program, self.registry_url))
+ self.results['changed'] = True
+
+ def get_credential_store_instance(self, registry, dockercfg_path):
+ '''
+ Return an instance of docker.credentials.Store used by the given registry.
+
+ :return: A Store or None
+ :rtype: Union[docker.credentials.Store, NoneType]
+ '''
+
+ credstore_env = self.client.credstore_env
+
+ config = auth.load_config(config_path=dockercfg_path)
+
+ store_name = auth.get_credential_store(config, registry)
+
+ # Make sure that there is a credential helper before trying to instantiate a
+ # Store object.
+ if store_name:
+ self.log("Found credential store %s" % store_name)
+ return Store(store_name, environment=credstore_env)
+
+ return DockerFileStore(dockercfg_path)
+
+
+def main():
+
+ argument_spec = dict(
+ registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ reauthorize=dict(type='bool', default=False, aliases=['reauth']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
+ )
+
+ required_if = [
+ ('state', 'present', ['username', 'password']),
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ login_result={}
+ )
+
+ manager = LoginManager(client, results)
+ manager.run()
+
+ if 'actions' in results:
+ del results['actions']
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_network.py b/ansible_collections/community/docker/plugins/modules/docker_network.py
new file mode 100644
index 000000000..db9323636
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_network.py
@@ -0,0 +1,679 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_network
+short_description: Manage Docker networks
+description:
+ - Create/remove Docker networks and connect containers to them.
+ - Performs largely the same function as the C(docker network) CLI subcommand.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ name:
+ description:
+ - Name of the network to operate on.
+ type: str
+ required: true
+ aliases:
+ - network_name
+
+ connected:
+ description:
+ - List of container names or container IDs to connect to a network.
+ - Please note that the module only makes sure that these containers are connected to the network,
+ but does not care about connection options. If you rely on specific IP addresses etc., use the
+ M(community.docker.docker_container) module to ensure your containers are correctly connected to this network.
+ type: list
+ elements: str
+ default: []
+ aliases:
+ - containers
+
+ driver:
+ description:
+ - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
+ type: str
+ default: bridge
+
+ driver_options:
+ description:
+ - Dictionary of network settings. Consult docker docs for valid options and values.
+ type: dict
+ default: {}
+
+ force:
+ description:
+ - With state C(absent) forces disconnecting all containers from the
+ network prior to deleting the network. With state C(present) will
+ disconnect all containers, delete the network and re-create the
+ network.
+ - This option is required if you have changed the IPAM or driver options
+ and want an existing network to be updated to use the new options.
+ type: bool
+ default: false
+
+ appends:
+ description:
+ - By default the connected list is canonical, meaning containers not on the list are removed from the network.
+ - Use I(appends) to leave existing containers connected.
+ type: bool
+ default: false
+ aliases:
+ - incremental
+
+ enable_ipv6:
+ description:
+ - Enable IPv6 networking.
+ type: bool
+
+ ipam_driver:
+ description:
+ - Specify an IPAM driver.
+ type: str
+
+ ipam_driver_options:
+ description:
+ - Dictionary of IPAM driver options.
+ type: dict
+
+ ipam_config:
+ description:
+ - List of IPAM config blocks. Consult
+ L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
+ Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
+ type: list
+ elements: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ state:
+ description:
+ - C(absent) deletes the network. If a network has connected containers, it
+ cannot be deleted. Use the I(force) option to disconnect all containers
+ and delete the network.
+ - C(present) creates the network, if it does not already exist with the
+ specified parameters, and connects the list of containers provided via
+ the connected parameter. Containers not on the list will be disconnected.
+ An empty list will leave no containers connected to the network. Use the
+ I(appends) option to leave existing containers connected. Use the I(force)
+ options to force re-creation of the network.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+ internal:
+ description:
+ - Restrict external access to the network.
+ type: bool
+
+ labels:
+ description:
+ - Dictionary of labels.
+ type: dict
+ default: {}
+
+ scope:
+ description:
+ - Specify the network's scope.
+ type: str
+ choices:
+ - local
+ - global
+ - swarm
+
+ attachable:
+ description:
+ - If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
+ type: bool
+
+notes:
+ - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
+ It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
+ connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
+ network, loop the M(community.docker.docker_container) module to loop over your containers to make sure they are connected properly.
+ - The module does not support Docker Swarm. This means that it will not try to disconnect or reconnect services. If services are connected to the
+ network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
+ fail as well.
+
+author:
+ - "Ben Keith (@keitwb)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Create a network
+ community.docker.docker_network:
+ name: network_one
+
+- name: Remove all but selected list of containers
+ community.docker.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ - container_b
+ - container_c
+
+- name: Remove a single container
+ community.docker.docker_network:
+ name: network_one
+ connected: "{{ fulllist|difference(['container_a']) }}"
+
+- name: Add a container to a network, leaving existing containers connected
+ community.docker.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ appends: true
+
+- name: Create a network with driver options
+ community.docker.docker_network:
+ name: network_two
+ driver_options:
+ com.docker.network.bridge.name: net2
+
+- name: Create a network with custom IPAM config
+ community.docker.docker_network:
+ name: network_three
+ ipam_config:
+ - subnet: 172.23.27.0/24
+ gateway: 172.23.27.2
+ iprange: 172.23.27.0/26
+ aux_addresses:
+ host1: 172.23.27.3
+ host2: 172.23.27.4
+
+- name: Create a network with labels
+ community.docker.docker_network:
+ name: network_four
+ labels:
+ key1: value1
+ key2: value2
+
+- name: Create a network with IPv6 IPAM config
+ community.docker.docker_network:
+ name: network_ipv6_one
+ enable_ipv6: true
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce1::/64
+
+- name: Create a network with IPv6 and custom IPv4 IPAM config
+ community.docker.docker_network:
+ name: network_ipv6_two
+ enable_ipv6: true
+ ipam_config:
+ - subnet: 172.24.27.0/24
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+
+- name: Delete a network, disconnecting all containers
+ community.docker.docker_network:
+ name: network_one
+ state: absent
+ force: true
+'''
+
+RETURN = '''
+network:
+ description:
+ - Network inspection results for the affected network.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import re
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ DifferenceTracker,
+ clean_dict_booleans_for_docker_api,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.name = None
+ self.connected = None
+ self.driver = None
+ self.driver_options = None
+ self.ipam_driver = None
+ self.ipam_driver_options = None
+ self.ipam_config = None
+ self.appends = None
+ self.force = None
+ self.internal = None
+ self.labels = None
+ self.debug = None
+ self.enable_ipv6 = None
+ self.scope = None
+ self.attachable = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+def container_names_in_network(network):
+ return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
+
+
+CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
+CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
+
+
+def validate_cidr(cidr):
+ """Validate CIDR. Return IP version of a CIDR string on success.
+
+ :param cidr: Valid CIDR
+ :type cidr: str
+ :return: ``ipv4`` or ``ipv6``
+ :rtype: str
+ :raises ValueError: If ``cidr`` is not a valid CIDR
+ """
+ if CIDR_IPV4.match(cidr):
+ return 'ipv4'
+ elif CIDR_IPV6.match(cidr):
+ return 'ipv6'
+ raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
+
+
+def normalize_ipam_config_key(key):
+ """Normalizes IPAM config keys returned by Docker API to match Ansible keys.
+
+ :param key: Docker API key
+ :type key: str
+ :return Ansible module key
+ :rtype str
+ """
+ special_cases = {
+ 'AuxiliaryAddresses': 'aux_addresses'
+ }
+ return special_cases.get(key, key.lower())
+
+
+def dicts_are_essentially_equal(a, b):
+ """Make sure that a is a subset of b, where None entries of a are ignored."""
+ for k, v in a.items():
+ if v is None:
+ continue
+ if b.get(k) != v:
+ return False
+ return True
+
+
+class DockerNetworkManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_network = self.get_existing_network()
+
+ if not self.parameters.connected and self.existing_network:
+ self.parameters.connected = container_names_in_network(self.existing_network)
+
+ if self.parameters.ipam_config:
+ try:
+ for ipam_config in self.parameters.ipam_config:
+ validate_cidr(ipam_config['subnet'])
+ except ValueError as e:
+ self.client.fail(to_native(e))
+
+ if self.parameters.driver_options:
+ self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_network(self):
+ return self.client.get_network(name=self.parameters.name)
+
+ def has_different_config(self, net):
+ '''
+ Evaluates an existing network and returns a tuple containing a boolean
+ indicating if the configuration is different and a list of differences.
+
+ :param net: the inspection output for an existing network
+ :return: (bool, list)
+ '''
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != net['Driver']:
+ differences.add('driver',
+ parameter=self.parameters.driver,
+ active=net['Driver'])
+ if self.parameters.driver_options:
+ if not net.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=net.get('Options'))
+ else:
+ for key, value in self.parameters.driver_options.items():
+ if not (key in net['Options']) or value != net['Options'][key]:
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=net['Options'].get(key))
+
+ if self.parameters.ipam_driver:
+ if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
+ differences.add('ipam_driver',
+ parameter=self.parameters.ipam_driver,
+ active=net.get('IPAM'))
+
+ if self.parameters.ipam_driver_options is not None:
+ ipam_driver_options = net['IPAM'].get('Options') or {}
+ if ipam_driver_options != self.parameters.ipam_driver_options:
+ differences.add('ipam_driver_options',
+ parameter=self.parameters.ipam_driver_options,
+ active=ipam_driver_options)
+
+ if self.parameters.ipam_config is not None and self.parameters.ipam_config:
+ if not net.get('IPAM') or not net['IPAM']['Config']:
+ differences.add('ipam_config',
+ parameter=self.parameters.ipam_config,
+ active=net.get('IPAM', {}).get('Config'))
+ else:
+ # Put network's IPAM config into the same format as module's IPAM config
+ net_ipam_configs = []
+ for net_ipam_config in net['IPAM']['Config']:
+ config = dict()
+ for k, v in net_ipam_config.items():
+ config[normalize_ipam_config_key(k)] = v
+ net_ipam_configs.append(config)
+ # Compare lists of dicts as sets of dicts
+ for idx, ipam_config in enumerate(self.parameters.ipam_config):
+ net_config = dict()
+ for net_ipam_config in net_ipam_configs:
+ if dicts_are_essentially_equal(ipam_config, net_ipam_config):
+ net_config = net_ipam_config
+ break
+ for key, value in ipam_config.items():
+ if value is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value != net_config.get(key):
+ differences.add('ipam_config[%s].%s' % (idx, key),
+ parameter=value,
+ active=net_config.get(key))
+
+ if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
+ differences.add('enable_ipv6',
+ parameter=self.parameters.enable_ipv6,
+ active=net.get('EnableIPv6', False))
+
+ if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
+ differences.add('internal',
+ parameter=self.parameters.internal,
+ active=net.get('Internal'))
+
+ if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
+ differences.add('scope',
+ parameter=self.parameters.scope,
+ active=net.get('Scope'))
+
+ if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
+ differences.add('attachable',
+ parameter=self.parameters.attachable,
+ active=net.get('Attachable'))
+ if self.parameters.labels:
+ if not net.get('Labels'):
+ differences.add('labels',
+ parameter=self.parameters.labels,
+ active=net.get('Labels'))
+ else:
+ for key, value in self.parameters.labels.items():
+ if not (key in net['Labels']) or value != net['Labels'][key]:
+ differences.add('labels.%s' % key,
+ parameter=value,
+ active=net['Labels'].get(key))
+
+ return not differences.empty, differences
+
+ def create_network(self):
+ if not self.existing_network:
+ data = {
+ 'Name': self.parameters.name,
+ 'Driver': self.parameters.driver,
+ 'Options': self.parameters.driver_options,
+ 'IPAM': None,
+ 'CheckDuplicate': None,
+ }
+
+ if self.parameters.enable_ipv6:
+ data['EnableIPv6'] = True
+ if self.parameters.internal:
+ data['Internal'] = True
+ if self.parameters.scope is not None:
+ data['Scope'] = self.parameters.scope
+ if self.parameters.attachable is not None:
+ data['Attachable'] = self.parameters.attachable
+ if self.parameters.labels is not None:
+ data["Labels"] = self.parameters.labels
+
+ ipam_pools = []
+ if self.parameters.ipam_config:
+ for ipam_pool in self.parameters.ipam_config:
+ ipam_pools.append({
+ 'Subnet': ipam_pool['subnet'],
+ 'IPRange': ipam_pool['iprange'],
+ 'Gateway': ipam_pool['gateway'],
+ 'AuxiliaryAddresses': ipam_pool['aux_addresses'],
+ })
+
+ if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
+ # Only add IPAM if a driver was specified or if IPAM parameters were
+ # specified. Leaving this parameter out can significantly speed up
+ # creation; on my machine creation with this option needs ~15 seconds,
+ # and without just a few seconds.
+ data['IPAM'] = {
+ 'Driver': self.parameters.ipam_driver,
+ 'Config': ipam_pools or [],
+ 'Options': self.parameters.ipam_driver_options,
+ }
+
+ if not self.check_mode:
+ resp = self.client.post_json_to_json('/networks/create', data=data)
+ self.client.report_warnings(resp, ['Warning'])
+ self.existing_network = self.client.get_network(network_id=resp['Id'])
+ self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_network(self):
+ if self.existing_network:
+ self.disconnect_all_containers()
+ if not self.check_mode:
+ self.client.delete_call('/networks/{0}', self.parameters.name)
+ self.results['actions'].append("Removed network %s" % (self.parameters.name,))
+ self.results['changed'] = True
+
+ def is_container_connected(self, container_name):
+ if not self.existing_network:
+ return False
+ return container_name in container_names_in_network(self.existing_network)
+
+ def connect_containers(self):
+ for name in self.parameters.connected:
+ if not self.is_container_connected(name):
+ if not self.check_mode:
+ data = {
+ "Container": name,
+ "EndpointConfig": None,
+ }
+ self.client.post_json('/networks/{0}/connect', self.parameters.name, data=data)
+ self.results['actions'].append("Connected container %s" % (name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(name), parameter=True, active=False)
+
+ def disconnect_missing(self):
+ if not self.existing_network:
+ return
+ containers = self.existing_network['Containers']
+ if not containers:
+ return
+ for c in containers.values():
+ name = c['Name']
+ if name not in self.parameters.connected:
+ self.disconnect_container(name)
+
+ def disconnect_all_containers(self):
+ containers = self.client.get_network(name=self.parameters.name)['Containers']
+ if not containers:
+ return
+ for cont in containers.values():
+ self.disconnect_container(cont['Name'])
+
+ def disconnect_container(self, container_name):
+ if not self.check_mode:
+ data = {"Container": container_name}
+ self.client.post_json('/networks/{0}/disconnect', self.parameters.name, data=data)
+ self.results['actions'].append("Disconnected container %s" % (container_name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(container_name),
+ parameter=False,
+ active=True)
+
+ def present(self):
+ different = False
+ differences = DifferenceTracker()
+ if self.existing_network:
+ different, differences = self.has_different_config(self.existing_network)
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
+ if self.parameters.force or different:
+ self.remove_network()
+ self.existing_network = None
+
+ self.create_network()
+ self.connect_containers()
+ if not self.parameters.appends:
+ self.disconnect_missing()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ network_facts = self.get_existing_network()
+ self.results['network'] = network_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
+ self.remove_network()
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['network_name']),
+ connected=dict(type='list', default=[], elements='str', aliases=['containers']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='bridge'),
+ driver_options=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ appends=dict(type='bool', default=False, aliases=['incremental']),
+ ipam_driver=dict(type='str'),
+ ipam_driver_options=dict(type='dict'),
+ ipam_config=dict(type='list', elements='dict', options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ )),
+ enable_ipv6=dict(type='bool'),
+ internal=dict(type='bool'),
+ labels=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False),
+ scope=dict(type='str', choices=['local', 'global', 'swarm']),
+ attachable=dict(type='bool'),
+ )
+
+ option_minimal_versions = dict(
+ scope=dict(docker_api_version='1.30'),
+ attachable=dict(docker_api_version='1.26'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ # "The docker server >= 1.10.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerNetworkManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_network_info.py b/ansible_collections/community/docker/plugins/modules/docker_network_info.py
new file mode 100644
index 000000000..9818baad5
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_network_info.py
@@ -0,0 +1,140 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_network_info
+
+short_description: Retrieves facts about docker network
+
+description:
+ - Retrieves facts about a docker network.
+ - Essentially returns the output of C(docker network inspect <name>), similar to what M(community.docker.docker_network)
+ returns for a non-absent network.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - The name of the network to inspect.
+ - When identifying an existing network name may be a name or a long or short network ID.
+ type: str
+ required: true
+
+author:
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get infos on network
+ community.docker.docker_network_info:
+ name: mydata
+ register: result
+
+- name: Does network exist?
+ ansible.builtin.debug:
+ msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about network
+ ansible.builtin.debug:
+ var: result.network
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the network exists.
+ type: bool
+ returned: always
+ sample: true
+network:
+ description:
+ - Facts representing the current state of the network. Matches the docker inspection output.
+ - Will be C(none) if network does not exist.
+ returned: always
+ type: dict
+ sample: {
+ "Attachable": false,
+ "ConfigFrom": {
+ "Network": ""
+ },
+ "ConfigOnly": false,
+ "Containers": {},
+ "Created": "2018-12-07T01:47:51.250835114-06:00",
+ "Driver": "bridge",
+ "EnableIPv6": false,
+ "IPAM": {
+ "Config": [
+ {
+ "Gateway": "192.168.96.1",
+ "Subnet": "192.168.96.0/20"
+ }
+ ],
+ "Driver": "default",
+ "Options": null
+ },
+ "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
+ "Ingress": false,
+ "Internal": false,
+ "Labels": {},
+ "Name": "ansible-test-f2700bba",
+ "Options": {},
+ "Scope": "local"
+ }
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ network = client.get_network(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if network else False),
+ network=network,
+ )
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_node.py b/ansible_collections/community/docker/plugins/modules/docker_node.py
new file mode 100644
index 000000000..d097b07f7
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_node.py
@@ -0,0 +1,306 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_node
+short_description: Manage Docker Swarm node
+description:
+ - Manages the Docker nodes via Swarm Manager.
+ - This module allows to change the node's role, its availability, and to modify, add or remove node labels.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ hostname:
+ description:
+ - The hostname or ID of node as registered in Swarm.
+ - If more than one node is registered using the same hostname the ID must be used,
+ otherwise module will fail.
+ type: str
+ required: true
+ labels:
+ description:
+ - User-defined key/value metadata that will be assigned as node attribute.
+ - Label operations in this module apply to the docker swarm node specified by I(hostname).
+ Use M(community.docker.docker_swarm) module to add/modify/remove swarm cluster labels.
+ - The actual state of labels assigned to the node when module completes its work depends on
+ I(labels_state) and I(labels_to_remove) parameters values. See description below.
+ type: dict
+ labels_state:
+ description:
+ - It defines the operation on the labels assigned to node and labels specified in I(labels) option.
+ - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node.
+ If no labels are assigned then it will add listed labels. For labels that are already assigned
+ to the node, it will update their values. The labels not specified in I(labels) will remain unchanged.
+ If I(labels) is empty then no changes will be made.
+ - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then
+ all labels assigned to the node will be removed.
+ type: str
+ default: 'merge'
+ choices:
+ - merge
+ - replace
+ labels_to_remove:
+ description:
+ - List of labels that will be removed from the node configuration. The list has to contain only label
+ names, not their values.
+ - If the label provided on the list is not assigned to the node, the entry is ignored.
+ - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains
+ assigned to the node.
+ - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to
+ node are removed and I(labels_to_remove) is ignored.
+ type: list
+ elements: str
+ availability:
+ description: Node availability to assign. If not provided then node availability remains unchanged.
+ choices:
+ - active
+ - pause
+ - drain
+ type: str
+ role:
+ description: Node role to assign. If not provided then node role remains unchanged.
+ choices:
+ - manager
+ - worker
+ type: str
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - Docker API >= 1.25
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+ - Thierry Bouvet (@tbouvet)
+
+'''
+
+EXAMPLES = '''
+- name: Set node role
+ community.docker.docker_node:
+ hostname: mynode
+ role: manager
+
+- name: Set node availability
+ community.docker.docker_node:
+ hostname: mynode
+ availability: drain
+
+- name: Replace node labels with new labels
+ community.docker.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+ labels_state: replace
+
+- name: Merge node labels and new labels
+ community.docker.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+
+- name: Remove all labels assigned to node
+ community.docker.docker_node:
+ hostname: mynode
+ labels_state: replace
+
+- name: Remove selected labels from the node
+ community.docker.docker_node:
+ hostname: mynode
+ labels_to_remove:
+ - key1
+ - key2
+'''
+
+RETURN = '''
+node:
+ description: Information about node after 'update' operation
+ returned: success
+ type: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ DockerBaseClass,
+ RequestException,
+)
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+
+ # Spec
+ self.name = None
+ self.labels = None
+ self.labels_state = None
+ self.labels_to_remove = None
+
+ # Node
+ self.availability = None
+ self.role = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+class SwarmNodeManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmNodeManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.parameters = TaskParameters(client)
+
+ self.node_update()
+
+ def node_update(self):
+ if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
+ self.client.fail("This node is not part of a swarm.")
+ return
+
+ if self.client.check_if_swarm_node_is_down():
+ self.client.fail("Can not update the node. The node is down.")
+
+ try:
+ node_info = self.client.inspect_node(node_id=self.parameters.hostname)
+ except APIError as exc:
+ self.client.fail("Failed to get node information for %s" % to_native(exc))
+
+ changed = False
+ node_spec = dict(
+ Availability=self.parameters.availability,
+ Role=self.parameters.role,
+ Labels=self.parameters.labels,
+ )
+
+ if self.parameters.role is None:
+ node_spec['Role'] = node_info['Spec']['Role']
+ else:
+ if not node_info['Spec']['Role'] == self.parameters.role:
+ node_spec['Role'] = self.parameters.role
+ changed = True
+
+ if self.parameters.availability is None:
+ node_spec['Availability'] = node_info['Spec']['Availability']
+ else:
+ if not node_info['Spec']['Availability'] == self.parameters.availability:
+ node_info['Spec']['Availability'] = self.parameters.availability
+ changed = True
+
+ if self.parameters.labels_state == 'replace':
+ if self.parameters.labels is None:
+ node_spec['Labels'] = {}
+ if node_info['Spec']['Labels']:
+ changed = True
+ else:
+ if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
+ node_spec['Labels'] = self.parameters.labels
+ changed = True
+ elif self.parameters.labels_state == 'merge':
+ node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
+ if self.parameters.labels is not None:
+ for key, value in self.parameters.labels.items():
+ if node_spec['Labels'].get(key) != value:
+ node_spec['Labels'][key] = value
+ changed = True
+
+ if self.parameters.labels_to_remove is not None:
+ for key in self.parameters.labels_to_remove:
+ if self.parameters.labels is not None:
+ if not self.parameters.labels.get(key):
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+ else:
+ self.client.module.warn(
+ "Label '%s' listed both in 'labels' and 'labels_to_remove'. "
+ "Keeping the assigned label value."
+ % to_native(key))
+ else:
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+
+ if changed is True:
+ if not self.check_mode:
+ try:
+ self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
+ node_spec=node_spec)
+ except APIError as exc:
+ self.client.fail("Failed to update node : %s" % to_native(exc))
+ self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
+ self.results['changed'] = changed
+ else:
+ self.results['node'] = node_info
+ self.results['changed'] = changed
+
+
+def main():
+ argument_spec = dict(
+ hostname=dict(type='str', required=True),
+ labels=dict(type='dict'),
+ labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
+ labels_to_remove=dict(type='list', elements='str'),
+ availability=dict(type='str', choices=['active', 'pause', 'drain']),
+ role=dict(type='str', choices=['worker', 'manager']),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ SwarmNodeManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_node_info.py b/ansible_collections/community/docker/plugins/modules/docker_node_info.py
new file mode 100644
index 000000000..d943db31b
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_node_info.py
@@ -0,0 +1,162 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_node_info
+
+short_description: Retrieves facts about docker swarm node from Swarm Manager
+
+description:
+ - Retrieves facts about a docker node.
+ - Essentially returns the output of C(docker node inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - The name of the node to inspect.
+ - The list of nodes names to inspect.
+ - If empty then return information of all nodes in Swarm cluster.
+ - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
+ - If I(self) is C(true) then this parameter is ignored.
+ type: list
+ elements: str
+ self:
+ description:
+ - If C(true), queries the node (that is, the docker daemon) the module communicates with.
+ - If C(true) then I(name) is ignored.
+ - If C(false) then query depends on I(name) presence and value.
+ type: bool
+ default: false
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get info on all nodes
+ community.docker.docker_node_info:
+ register: result
+
+- name: Get info on node
+ community.docker.docker_node_info:
+ name: mynode
+ register: result
+
+- name: Get info on list of nodes
+ community.docker.docker_node_info:
+ name:
+ - mynode1
+ - mynode2
+ register: result
+
+- name: Get info on host if it is Swarm Manager
+ community.docker.docker_node_info:
+ self: true
+ register: result
+'''
+
+RETURN = '''
+nodes:
+ description:
+ - Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
+ - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided.
+ - If I(name) contains a list of nodes, the output will provide information on all nodes registered
+ at the swarm, including nodes that left the swarm but have not been removed from the cluster on swarm
+ managers and nodes that are unreachable.
+ returned: always
+ type: list
+ elements: dict
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_node_facts(client):
+
+ results = []
+
+ if client.module.params['self'] is True:
+ self_node_id = client.get_swarm_node_id()
+ node_info = client.get_node_inspect(node_id=self_node_id)
+ results.append(node_info)
+ return results
+
+ if client.module.params['name'] is None:
+ node_info = client.get_all_nodes_inspect()
+ return node_info
+
+ nodes = client.module.params['name']
+ if not isinstance(nodes, list):
+ nodes = [nodes]
+
+ for next_node_name in nodes:
+ next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
+ if next_node_info:
+ results.append(next_node_info)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ self=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ nodes = get_node_facts(client)
+
+ client.module.exit_json(
+ changed=False,
+ nodes=nodes,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_plugin.py b/ansible_collections/community/docker/plugins/modules/docker_plugin.py
new file mode 100644
index 000000000..9bb850665
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_plugin.py
@@ -0,0 +1,392 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright (c) 2021 Red Hat | Ansible Sakar Mehra<@sakarmehra100@gmail.com | @sakar97>
+# Copyright (c) 2019, Vladimir Porshkevich (@porshkevich) <neosonic@mail.ru>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_plugin
+short_description: Manage Docker plugins
+version_added: 1.3.0
+description:
+ - This module allows to install, delete, enable and disable Docker plugins.
+ - Performs largely the same function as the C(docker plugin) CLI subcommand.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ plugin_name:
+ description:
+ - Name of the plugin to operate on.
+ required: true
+ type: str
+
+ state:
+ description:
+ - C(absent) remove the plugin.
+ - C(present) install the plugin, if it does not already exist.
+ - C(enable) enable the plugin.
+ - C(disable) disable the plugin.
+ default: present
+ choices:
+ - absent
+ - present
+ - enable
+ - disable
+ type: str
+
+ alias:
+ description:
+ - Local name for plugin.
+ type: str
+ version_added: 1.8.0
+
+ plugin_options:
+ description:
+ - Dictionary of plugin settings.
+ type: dict
+ default: {}
+
+ force_remove:
+ description:
+ - Remove even if the plugin is enabled.
+ default: false
+ type: bool
+
+ enable_timeout:
+ description:
+ - Timeout in seconds.
+ type: int
+ default: 0
+
+author:
+ - Sakar Mehra (@sakar97)
+ - Vladimir Porshkevich (@porshkevich)
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Install a plugin
+ community.docker.docker_plugin:
+ plugin_name: plugin_one
+ state: present
+
+- name: Remove a plugin
+ community.docker.docker_plugin:
+ plugin_name: plugin_one
+ state: absent
+
+- name: Enable the plugin
+ community.docker.docker_plugin:
+ plugin_name: plugin_one
+ state: enable
+
+- name: Disable the plugin
+ community.docker.docker_plugin:
+ plugin_name: plugin_one
+ state: disable
+
+- name: Install a plugin with options
+ community.docker.docker_plugin:
+ plugin_name: weaveworks/net-plugin:latest_release
+ plugin_options:
+ IPALLOC_RANGE: "10.32.0.0/12"
+ WEAVE_PASSWORD: "PASSWORD"
+'''
+
+RETURN = '''
+plugin:
+ description:
+ - Plugin inspection results for the affected plugin.
+ returned: success
+ type: dict
+ sample: {}
+actions:
+ description:
+ - List of actions performed during task execution.
+ returned: when I(state!=absent)
+ type: list
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ DifferenceTracker,
+)
+
+from ansible_collections.community.docker.plugins.module_utils._api import auth
+from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException, NotFound
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+ self.plugin_name = None
+ self.alias = None
+ self.plugin_options = None
+ self.debug = None
+ self.force_remove = None
+ self.enable_timeout = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+def prepare_options(options):
+ return ['%s=%s' % (k, v if v is not None else "") for k, v in options.items()] if options else []
+
+
+def parse_options(options_list):
+ return dict(x.split('=', 1) for x in options_list) if options_list else {}
+
+
+class DockerPluginManager(object):
+
+ def __init__(self, client):
+ self.client = client
+
+ self.parameters = TaskParameters(client)
+ self.preferred_name = self.parameters.alias or self.parameters.plugin_name
+ self.check_mode = self.client.check_mode
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.actions = []
+ self.changed = False
+
+ self.existing_plugin = self.get_existing_plugin()
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+ elif state == 'enable':
+ self.enable()
+ elif state == 'disable':
+ self.disable()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.diff = self.diff_result
+
+ def get_existing_plugin(self):
+ try:
+ return self.client.get_json('/plugins/{0}/json', self.preferred_name)
+ except NotFound:
+ return None
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ def has_different_config(self):
+ """
+ Return the list of differences between the current parameters and the existing plugin.
+
+ :return: list of options that differ
+ """
+ differences = DifferenceTracker()
+ if self.parameters.plugin_options:
+ settings = self.existing_plugin.get('Settings')
+ if not settings:
+ differences.add('plugin_options', parameters=self.parameters.plugin_options, active=settings)
+ else:
+ existing_options = parse_options(settings.get('Env'))
+
+ for key, value in self.parameters.plugin_options.items():
+ if ((not existing_options.get(key) and value) or
+ not value or
+ value != existing_options[key]):
+ differences.add('plugin_options.%s' % key,
+ parameter=value,
+ active=existing_options.get(key))
+
+ return differences
+
+ def install_plugin(self):
+ if not self.existing_plugin:
+ if not self.check_mode:
+ try:
+ # Get privileges
+ headers = {}
+ registry, repo_name = auth.resolve_repository_name(self.parameters.plugin_name)
+ header = auth.get_config_header(self.client, registry)
+ if header:
+ headers['X-Registry-Auth'] = header
+ privileges = self.client.get_json('/plugins/privileges', params={'remote': self.parameters.plugin_name}, headers=headers)
+ # Pull plugin
+ params = {
+ 'remote': self.parameters.plugin_name,
+ }
+ if self.parameters.alias:
+ params['name'] = self.parameters.alias
+ response = self.client._post_json(self.client._url('/plugins/pull'), params=params, headers=headers, data=privileges, stream=True)
+ self.client._raise_for_status(response)
+ for data in self.client._stream_helper(response, decode=True):
+ pass
+ # Inspect and configure plugin
+ self.existing_plugin = self.client.get_json('/plugins/{0}/json', self.preferred_name)
+ if self.parameters.plugin_options:
+ data = prepare_options(self.parameters.plugin_options)
+ self.client.post_json('/plugins/{0}/set', self.preferred_name, data=data)
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ self.actions.append("Installed plugin %s" % self.preferred_name)
+ self.changed = True
+
+ def remove_plugin(self):
+ force = self.parameters.force_remove
+ if self.existing_plugin:
+ if not self.check_mode:
+ try:
+ self.client.delete_call('/plugins/{0}', self.preferred_name, params={'force': force})
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ self.actions.append("Removed plugin %s" % self.preferred_name)
+ self.changed = True
+
+ def update_plugin(self):
+ if self.existing_plugin:
+ differences = self.has_different_config()
+ if not differences.empty:
+ if not self.check_mode:
+ try:
+ data = prepare_options(self.parameters.plugin_options)
+ self.client.post_json('/plugins/{0}/set', self.preferred_name, data=data)
+ except APIError as e:
+ self.client.fail(to_native(e))
+ self.actions.append("Updated plugin %s settings" % self.preferred_name)
+ self.changed = True
+ else:
+ self.client.fail("Cannot update the plugin: Plugin does not exist")
+
+ def present(self):
+ differences = DifferenceTracker()
+ if self.existing_plugin:
+ differences = self.has_different_config()
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_plugin is not None)
+
+ if self.existing_plugin:
+ self.update_plugin()
+ else:
+ self.install_plugin()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.actions = None
+
+ def absent(self):
+ self.remove_plugin()
+
+ def enable(self):
+ timeout = self.parameters.enable_timeout
+ if self.existing_plugin:
+ if not self.existing_plugin.get('Enabled'):
+ if not self.check_mode:
+ try:
+ self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout})
+ except APIError as e:
+ self.client.fail(to_native(e))
+ self.actions.append("Enabled plugin %s" % self.preferred_name)
+ self.changed = True
+ else:
+ self.install_plugin()
+ if not self.check_mode:
+ try:
+ self.client.post_json('/plugins/{0}/enable', self.preferred_name, params={'timeout': timeout})
+ except APIError as e:
+ self.client.fail(to_native(e))
+ self.actions.append("Enabled plugin %s" % self.preferred_name)
+ self.changed = True
+
+ def disable(self):
+ if self.existing_plugin:
+ if self.existing_plugin.get('Enabled'):
+ if not self.check_mode:
+ try:
+ self.client.post_json('/plugins/{0}/disable', self.preferred_name)
+ except APIError as e:
+ self.client.fail(to_native(e))
+ self.actions.append("Disable plugin %s" % self.preferred_name)
+ self.changed = True
+ else:
+ self.client.fail("Plugin not found: Plugin does not exist.")
+
+ @property
+ def result(self):
+ plugin_data = {}
+ if self.parameters.state != 'absent':
+ try:
+ plugin_data = self.client.get_json('/plugins/{0}/json', self.preferred_name)
+ except NotFound:
+ # This can happen in check mode
+ pass
+ result = {
+ 'actions': self.actions,
+ 'changed': self.changed,
+ 'diff': self.diff,
+ 'plugin': plugin_data,
+ }
+ return dict((k, v) for k, v in result.items() if v is not None)
+
+
+def main():
+ argument_spec = dict(
+ alias=dict(type='str'),
+ plugin_name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['present', 'absent', 'enable', 'disable']),
+ plugin_options=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False),
+ force_remove=dict(type='bool', default=False),
+ enable_timeout=dict(type='int', default=0),
+ )
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ cm = DockerPluginManager(client)
+ client.module.exit_json(**cm.result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_prune.py b/ansible_collections/community/docker/plugins/modules/docker_prune.py
new file mode 100644
index 000000000..1557f85a4
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_prune.py
@@ -0,0 +1,275 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_prune
+
+short_description: Allows to prune various docker objects
+
+description:
+ - Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
+ and C(docker volume prune) via the Docker API.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+
+options:
+ containers:
+ description:
+ - Whether to prune containers.
+ type: bool
+ default: false
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to delete.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to prune images.
+ type: bool
+ default: false
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to delete.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to prune networks.
+ type: bool
+ default: false
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to prune volumes.
+ type: bool
+ default: false
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ builder_cache:
+ description:
+ - Whether to prune the builder cache.
+ type: bool
+ default: false
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+notes:
+ - The module always returned C(changed=false) before community.docker 3.5.1.
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Prune containers older than 24h
+ community.docker.docker_prune:
+ containers: true
+ containers_filters:
+ # only consider containers created more than 24 hours ago
+ until: 24h
+
+- name: Prune everything
+ community.docker.docker_prune:
+ containers: true
+ images: true
+ networks: true
+ volumes: true
+ builder_cache: true
+
+- name: Prune everything (including non-dangling images)
+ community.docker.docker_prune:
+ containers: true
+ images: true
+ images_filters:
+ dangling: false
+ networks: true
+ volumes: true
+ builder_cache: true
+'''
+
+RETURN = '''
+# containers
+containers:
+ description:
+ - List of IDs of deleted containers.
+ returned: I(containers) is C(true)
+ type: list
+ elements: str
+ sample: []
+containers_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from container pruning in bytes.
+ returned: I(containers) is C(true)
+ type: int
+ sample: 0
+
+# images
+images:
+ description:
+ - List of IDs of deleted images.
+ returned: I(images) is C(true)
+ type: list
+ elements: str
+ sample: []
+images_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from image pruning in bytes.
+ returned: I(images) is C(true)
+ type: int
+ sample: 0
+
+# networks
+networks:
+ description:
+ - List of IDs of deleted networks.
+ returned: I(networks) is C(true)
+ type: list
+ elements: str
+ sample: []
+
+# volumes
+volumes:
+ description:
+ - List of IDs of deleted volumes.
+ returned: I(volumes) is C(true)
+ type: list
+ elements: str
+ sample: []
+volumes_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from volumes pruning in bytes.
+ returned: I(volumes) is C(true)
+ type: int
+ sample: 0
+
+# builder_cache
+builder_cache_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from builder cache pruning in bytes.
+ returned: I(builder_cache) is C(true)
+ type: int
+ sample: 0
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import clean_dict_booleans_for_docker_api
+
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException
+from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import convert_filters
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ builder_cache=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ # supports_check_mode=True,
+ )
+
+ try:
+ result = dict()
+ changed = False
+
+ if client.module.params['containers']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
+ params = {'filters': convert_filters(filters)}
+ res = client.post_to_json('/containers/prune', params=params)
+ result['containers'] = res.get('ContainersDeleted') or []
+ result['containers_space_reclaimed'] = res['SpaceReclaimed']
+ if result['containers'] or result['containers_space_reclaimed']:
+ changed = True
+
+ if client.module.params['images']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
+ params = {'filters': convert_filters(filters)}
+ res = client.post_to_json('/images/prune', params=params)
+ result['images'] = res.get('ImagesDeleted') or []
+ result['images_space_reclaimed'] = res['SpaceReclaimed']
+ if result['images'] or result['images_space_reclaimed']:
+ changed = True
+
+ if client.module.params['networks']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
+ params = {'filters': convert_filters(filters)}
+ res = client.post_to_json('/networks/prune', params=params)
+ result['networks'] = res.get('NetworksDeleted') or []
+ if result['networks']:
+ changed = True
+
+ if client.module.params['volumes']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
+ params = {'filters': convert_filters(filters)}
+ res = client.post_to_json('/volumes/prune', params=params)
+ result['volumes'] = res.get('VolumesDeleted') or []
+ result['volumes_space_reclaimed'] = res['SpaceReclaimed']
+ if result['volumes'] or result['volumes_space_reclaimed']:
+ changed = True
+
+ if client.module.params['builder_cache']:
+ res = client.post_to_json('/build/prune')
+ result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
+ if result['builder_cache_space_reclaimed']:
+ changed = True
+
+ result['changed'] = changed
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_secret.py b/ansible_collections/community/docker/plugins/modules/docker_secret.py
new file mode 100644
index 000000000..546756a49
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_secret.py
@@ -0,0 +1,406 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_secret
+
+short_description: Manage docker secrets.
+
+description:
+ - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
+ - Adds to the metadata of new secrets C(ansible_key), an encrypted hash representation of the data, which is then used
+ in future runs to test if a secret has changed. If C(ansible_key) is not present, then a secret will not be updated
+ unless the I(force) option is set.
+ - Updates to secrets are performed by removing the secret and creating it again.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_2_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: none
+
+options:
+ data:
+ description:
+ - The value of the secret.
+ - Mutually exclusive with I(data_src). One of I(data) and I(data_src) is required if I(state=present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: false
+ data_src:
+ description:
+ - The file on the target from which to read the secret.
+ - Mutually exclusive with I(data). One of I(data) and I(data_src) is required if I(state=present).
+ type: path
+ version_added: 1.10.0
+ labels:
+ description:
+ - "A map of key:value meta data, where both key and value are expected to be strings."
+ - If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing secret.
+ - If C(true), an existing secret will be replaced, even if it has not changed.
+ type: bool
+ default: false
+ rolling_versions:
+ description:
+ - If set to C(true), secrets are created with an increasing version number appended to their name.
+ - Adds a label containing the version number to the managed secrets with the name C(ansible_version).
+ type: bool
+ default: false
+ version_added: 2.2.0
+ versions_to_keep:
+ description:
+ - When using I(rolling_versions), the number of old versions of the secret to keep.
+ - Extraneous old secrets are deleted after the new one is created.
+ - Set to C(-1) to keep everything or to C(0) or C(1) to keep only the current one.
+ type: int
+ default: 5
+ version_added: 2.2.0
+ name:
+ description:
+ - The name of the secret.
+ type: str
+ required: true
+ state:
+ description:
+ - Set to C(present), if the secret should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Create secret foo (from a file on the control machine)
+ community.docker.docker_secret:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Create secret foo (from a file on the target machine)
+ community.docker.docker_secret:
+ name: foo
+ data_src: /path/to/secret/file
+ state: present
+
+- name: Change the secret data
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the secret
+ two: '2'
+ state: present
+
+- name: No change
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing secret
+ state: present
+
+- name: Update an existing label
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the secret
+ one: '1'
+ state: present
+
+- name: Force the removal/creation of the secret
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ force: true
+ state: present
+
+- name: Remove secret foo
+ community.docker.docker_secret:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+secret_id:
+ description:
+ - The ID assigned by Docker to the secret object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+secret_name:
+ description:
+ - The name of the created secret object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'awesome_secret'
+ version_added: 2.2.0
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ compare_generic,
+)
+from ansible.module_utils.common.text.converters import to_native, to_bytes
+
+
+class SecretManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SecretManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ data_src = parameters.get('data_src')
+ if data_src is not None:
+ try:
+ with open(data_src, 'rb') as f:
+ self.data = f.read()
+ except Exception as exc:
+ self.client.fail('Error while reading {src}: {error}'.format(src=data_src, error=to_native(exc)))
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.rolling_versions = parameters.get('rolling_versions')
+ self.versions_to_keep = parameters.get('versions_to_keep')
+
+ if self.rolling_versions:
+ self.version = 0
+ self.data_key = None
+ self.secrets = []
+
+ def __call__(self):
+ self.get_secret()
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ self.remove_old_versions()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_version(self, secret):
+ try:
+ return int(secret.get('Spec', {}).get('Labels', {}).get('ansible_version', 0))
+ except ValueError:
+ return 0
+
+ def remove_old_versions(self):
+ if not self.rolling_versions or self.versions_to_keep < 0:
+ return
+ if not self.check_mode:
+ while len(self.secrets) > max(self.versions_to_keep, 1):
+ self.remove_secret(self.secrets.pop(0))
+
+ def get_secret(self):
+ ''' Find an existing secret. '''
+ try:
+ secrets = self.client.secrets(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
+
+ if self.rolling_versions:
+ self.secrets = [
+ secret
+ for secret in secrets
+ if secret['Spec']['Name'].startswith('{name}_v'.format(name=self.name))
+ ]
+ self.secrets.sort(key=self.get_version)
+ else:
+ self.secrets = [
+ secret for secret in secrets if secret['Spec']['Name'] == self.name
+ ]
+
+ def create_secret(self):
+ ''' Create a new secret '''
+ secret_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.rolling_versions:
+ self.version += 1
+ labels['ansible_version'] = str(self.version)
+ self.name = '{name}_v{version}'.format(name=self.name, version=self.version)
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ secret_id = self.client.create_secret(self.name, self.data, labels=labels)
+ self.secrets += self.client.secrets(filters={'id': secret_id})
+ except APIError as exc:
+ self.client.fail("Error creating secret: %s" % to_native(exc))
+
+ if isinstance(secret_id, dict):
+ secret_id = secret_id['ID']
+
+ return secret_id
+
+ def remove_secret(self, secret):
+ try:
+ if not self.check_mode:
+ self.client.remove_secret(secret['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing secret %s: %s" % (secret['Spec']['Name'], to_native(exc)))
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the secret '''
+ if self.secrets:
+ secret = self.secrets[-1]
+ self.results['secret_id'] = secret['ID']
+ self.results['secret_name'] = secret['Spec']['Name']
+ data_changed = False
+ attrs = secret.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ else:
+ if not self.force:
+ self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'true'")
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if self.rolling_versions:
+ self.version = self.get_version(secret)
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the secret
+ if not self.rolling_versions:
+ self.absent()
+ secret_id = self.create_secret()
+ self.results['changed'] = True
+ self.results['secret_id'] = secret_id
+ self.results['secret_name'] = self.name
+ else:
+ self.results['changed'] = True
+ self.results['secret_id'] = self.create_secret()
+ self.results['secret_name'] = self.name
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the secret '''
+ if self.secrets:
+ for secret in self.secrets:
+ self.remove_secret(secret)
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str', no_log=True),
+ data_is_b64=dict(type='bool', default=False),
+ data_src=dict(type='path'),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False),
+ rolling_versions=dict(type='bool', default=False),
+ versions_to_keep=dict(type='int', default=5),
+ )
+
+ required_if = [
+ ('state', 'present', ['data', 'data_src'], True),
+ ]
+
+ mutually_exclusive = [
+ ('data', 'data_src'),
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ mutually_exclusive=mutually_exclusive,
+ min_docker_version='2.1.0',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ secret_id='',
+ secret_name=''
+ )
+
+ SecretManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_stack.py b/ansible_collections/community/docker/plugins/modules/docker_stack.py
new file mode 100644
index 000000000..98f4c3ad9
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_stack.py
@@ -0,0 +1,309 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_stack
+author: "Dario Zanzico (@dariko)"
+short_description: docker stack module
+description:
+ - Manage docker stacks using the C(docker stack) command
+ on the target node (see examples).
+extends_documentation_fragment:
+ - community.docker.attributes
+attributes:
+ check_mode:
+ support: none
+ diff_mode:
+ support: none
+options:
+ name:
+ description:
+ - Stack name
+ type: str
+ required: true
+ state:
+ description:
+ - Service state.
+ type: str
+ default: "present"
+ choices:
+ - present
+ - absent
+ compose:
+ description:
+ - List of compose definitions. Any element may be a string
+ referring to the path of the compose file on the target host
+ or the YAML contents of a compose file nested as dictionary.
+ type: list
+ elements: raw
+ default: []
+ prune:
+ description:
+ - If true will add the C(--prune) option to the C(docker stack deploy) command.
+ This will have docker remove the services not present in the
+ current stack definition.
+ type: bool
+ default: false
+ with_registry_auth:
+ description:
+ - If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
+ This will have docker send registry authentication details to Swarm agents.
+ type: bool
+ default: false
+ resolve_image:
+ description:
+ - If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
+ This will have docker query the registry to resolve image digest and
+ supported platforms. If not set, docker use "always" by default.
+ type: str
+ choices: ["always", "changed", "never"]
+ absent_retries:
+ description:
+ - If C(>0) and I(state) is C(absent) the module will retry up to
+ I(absent_retries) times to delete the stack until all the
+ resources have been effectively deleted.
+ If the last try still reports the stack as not completely
+ removed the module will fail.
+ type: int
+ default: 0
+ absent_retries_interval:
+ description:
+ - Interval in seconds between consecutive I(absent_retries).
+ type: int
+ default: 1
+
+requirements:
+ - jsondiff
+ - pyyaml
+'''
+
+RETURN = '''
+stack_spec_diff:
+ description: |
+ dictionary containing the differences between the 'Spec' field
+ of the stack services before and after applying the new stack
+ definition.
+ sample: >
+ "stack_spec_diff":
+ {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
+ returned: on change
+ type: dict
+'''
+
+EXAMPLES = '''
+ - name: Deploy stack from a compose file
+ community.docker.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+
+ - name: Deploy stack from base compose file and override the web service
+ community.docker.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+ - version: '3'
+ services:
+ web:
+ image: nginx:latest
+ environment:
+ ENVVAR: envvar
+
+ - name: Remove stack
+ community.docker.docker_stack:
+ name: mystack
+ state: absent
+'''
+
+
+import json
+import tempfile
+from ansible.module_utils.six import string_types
+from time import sleep
+
+try:
+ from jsondiff import diff as json_diff
+ HAS_JSONDIFF = True
+except ImportError:
+ HAS_JSONDIFF = False
+
+try:
+ from yaml import dump as yaml_dump
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, os
+
+
+def docker_stack_services(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "stack",
+ "services",
+ stack_name,
+ "--format",
+ "{{.Name}}"])
+ if err == "Nothing found in stack: %s\n" % stack_name:
+ return []
+ return out.strip().split('\n')
+
+
+def docker_service_inspect(module, service_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "service",
+ "inspect",
+ service_name])
+ if rc != 0:
+ return None
+ else:
+ ret = json.loads(out)[0]['Spec']
+ return ret
+
+
+def docker_stack_deploy(module, stack_name, compose_files):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "deploy"]
+ if module.params["prune"]:
+ command += ["--prune"]
+ if module.params["with_registry_auth"]:
+ command += ["--with-registry-auth"]
+ if module.params["resolve_image"]:
+ command += ["--resolve-image",
+ module.params["resolve_image"]]
+ for compose_file in compose_files:
+ command += ["--compose-file",
+ compose_file]
+ command += [stack_name]
+ return module.run_command(command)
+
+
+def docker_stack_inspect(module, stack_name):
+ ret = {}
+ for service_name in docker_stack_services(module, stack_name):
+ ret[service_name] = docker_service_inspect(module, service_name)
+ return ret
+
+
+def docker_stack_rm(module, stack_name, retries, interval):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "rm", stack_name]
+
+ rc, out, err = module.run_command(command)
+
+ while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
+ sleep(interval)
+ retries = retries - 1
+ rc, out, err = module.run_command(command)
+ return rc, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'compose': dict(type='list', elements='raw', default=[]),
+ 'prune': dict(type='bool', default=False),
+ 'with_registry_auth': dict(type='bool', default=False),
+ 'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
+ 'state': dict(type='str', default='present', choices=['present', 'absent']),
+ 'absent_retries': dict(type='int', default=0),
+ 'absent_retries_interval': dict(type='int', default=1)
+ },
+ supports_check_mode=False
+ )
+
+ if not HAS_JSONDIFF:
+ return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
+
+ if not HAS_YAML:
+ return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
+
+ state = module.params['state']
+ compose = module.params['compose']
+ name = module.params['name']
+ absent_retries = module.params['absent_retries']
+ absent_retries_interval = module.params['absent_retries_interval']
+
+ if state == 'present':
+ if not compose:
+ module.fail_json(msg=("compose parameter must be a list "
+ "containing at least one element"))
+
+ compose_files = []
+ for i, compose_def in enumerate(compose):
+ if isinstance(compose_def, dict):
+ compose_file_fd, compose_file = tempfile.mkstemp()
+ module.add_cleanup_file(compose_file)
+ with os.fdopen(compose_file_fd, 'w') as stack_file:
+ compose_files.append(compose_file)
+ stack_file.write(yaml_dump(compose_def))
+ elif isinstance(compose_def, string_types):
+ compose_files.append(compose_def)
+ else:
+ module.fail_json(msg="compose element '%s' must be a string or a dictionary" % compose_def)
+
+ before_stack_services = docker_stack_inspect(module, name)
+
+ rc, out, err = docker_stack_deploy(module, name, compose_files)
+
+ after_stack_services = docker_stack_inspect(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="docker stack up deploy command failed",
+ rc=rc,
+ stdout=out, stderr=err)
+
+ before_after_differences = json_diff(before_stack_services,
+ after_stack_services)
+ for k in before_after_differences.keys():
+ if isinstance(before_after_differences[k], dict):
+ before_after_differences[k].pop('UpdatedAt', None)
+ before_after_differences[k].pop('Version', None)
+ if not list(before_after_differences[k].keys()):
+ before_after_differences.pop(k)
+
+ if not before_after_differences:
+ module.exit_json(
+ changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err)
+ else:
+ module.exit_json(
+ changed=True,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ stack_spec_diff=json_diff(before_stack_services,
+ after_stack_services,
+ dump=True))
+
+ else:
+ if docker_stack_services(module, name):
+ rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
+ if rc != 0:
+ module.fail_json(msg="'docker stack down' command failed",
+ rc=rc,
+ stdout=out, stderr=err)
+ else:
+ module.exit_json(changed=True,
+ msg=out, rc=rc,
+ stdout=out, stderr=err)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_stack_info.py b/ansible_collections/community/docker/plugins/modules/docker_stack_info.py
new file mode 100644
index 000000000..bf3bfbdbe
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_stack_info.py
@@ -0,0 +1,88 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information on a docker stack
+description:
+ - Retrieve information on docker stacks using the C(docker stack) command
+ on the target node (see examples).
+extends_documentation_fragment:
+ - community.docker.attributes
+ - community.docker.attributes.info_module
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of stacks or tasks associated
+ to a stack name.
+ sample:
+ - {"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"}
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.docker.docker_stack_info:
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_list(module):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ls", "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ },
+ supports_check_mode=True
+ )
+
+ rc, out, err = docker_stack_list(module)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py b/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py
new file mode 100644
index 000000000..e3693bc54
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py
@@ -0,0 +1,98 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_task_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information of the tasks on a docker stack
+description:
+ - Retrieve information on docker stacks tasks using the C(docker stack) command
+ on the target node (see examples).
+extends_documentation_fragment:
+ - community.docker.attributes
+ - community.docker.attributes.info_module
+options:
+ name:
+ description:
+ - Stack name.
+ type: str
+ required: true
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of tasks associated
+ to a stack name.
+ sample: >
+ [{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}]
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.docker.docker_stack_task_info:
+ name: test_stack
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_task(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True)
+ },
+ supports_check_mode=True
+ )
+
+ name = module.params['name']
+
+ rc, out, err = docker_stack_task(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm.py b/ansible_collections/community/docker/plugins/modules/docker_swarm.py
new file mode 100644
index 000000000..69b88f583
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_swarm.py
@@ -0,0 +1,728 @@
+#!/usr/bin/python
+
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm
+short_description: Manage Swarm cluster
+description:
+ - Create a new Swarm cluster.
+ - Add/Remove nodes or managers to an existing cluster.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ advertise_addr:
+ description:
+ - Externally reachable address advertised to other nodes.
+ - This can either be an address/port combination
+ in the form C(192.168.1.1:4567), or an interface followed by a
+ port number, like C(eth0:4567).
+ - If the port number is omitted,
+ the port number from the listen address is used.
+ - If I(advertise_addr) is not specified, it will be automatically
+ detected when possible.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default_addr_pool:
+ description:
+ - Default address pool in CIDR format.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: list
+ elements: str
+ subnet_size:
+ description:
+ - Default address pool subnet mask length.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: int
+ listen_addr:
+ description:
+ - Listen address used for inter-manager communication.
+ - This can either be an address/port combination in the form
+ C(192.168.1.1:4567), or an interface followed by a port number,
+ like C(eth0:4567).
+ - If the port number is omitted, the default swarm listening port
+ is used.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default: 0.0.0.0:2377
+ force:
+ description:
+ - Use with state C(present) to force creating a new Swarm, even if already part of one.
+ - Use with state C(absent) to Leave the swarm even if this node is a manager.
+ type: bool
+ default: false
+ state:
+ description:
+ - Set to C(present), to create/update a new cluster.
+ - Set to C(join), to join an existing cluster.
+ - Set to C(absent), to leave an existing cluster.
+ - Set to C(remove), to remove an absent node from the cluster.
+ Note that removing requires Docker SDK for Python >= 2.4.0.
+ - M(community.docker.docker_node) can be used to demote a manager before removal.
+ type: str
+ default: present
+ choices:
+ - present
+ - join
+ - absent
+ - remove
+ node_id:
+ description:
+ - Swarm id of the node to remove.
+ - Used with I(state=remove).
+ type: str
+ join_token:
+ description:
+ - Swarm token used to join a swarm cluster.
+ - Used with I(state=join).
+ - If this value is specified, the corresponding value in the return values will be censored by Ansible.
+ This is a side-effect of this value not being logged.
+ type: str
+ remote_addrs:
+ description:
+ - Remote address of one or more manager nodes of an existing Swarm to connect to.
+ - Used with I(state=join).
+ type: list
+ elements: str
+ task_history_retention_limit:
+ description:
+ - Maximum number of tasks history stored.
+ - Docker default value is C(5).
+ type: int
+ snapshot_interval:
+ description:
+ - Number of logs entries between snapshot.
+ - Docker default value is C(10000).
+ type: int
+ keep_old_snapshots:
+ description:
+ - Number of snapshots to keep beyond the current snapshot.
+ - Docker default value is C(0).
+ type: int
+ log_entries_for_slow_followers:
+ description:
+ - Number of log entries to keep around to sync up slow followers after a snapshot is created.
+ type: int
+ heartbeat_tick:
+ description:
+ - Amount of ticks (in seconds) between each heartbeat.
+ - Docker default value is C(1) seconds.
+ type: int
+ election_tick:
+ description:
+ - Amount of ticks (in seconds) needed without a leader to trigger a new election.
+ - Docker default value is C(10) seconds.
+ type: int
+ dispatcher_heartbeat_period:
+ description:
+ - The delay (in nanoseconds) for an agent to send a heartbeat to the dispatcher.
+ - Docker default value is 5 seconds, which corresponds to a value of C(5000000000).
+ # DefaultHeartBeatPeriod in https://github.com/moby/moby/blob/master/vendor/github.com/moby/swarmkit/v2/manager/dispatcher/dispatcher.go#L32
+ type: int
+ node_cert_expiry:
+ description:
+ - Automatic expiry for nodes certificates, given in nanoseconds.
+ - Docker default value is 90 days, which corresponds to a value of C(7776000000000000).
+ # DefaultNodeCertExpiration in https://github.com/moby/moby/blob/master/vendor/github.com/moby/swarmkit/v2/ca/certificates.go#L56
+ type: int
+ name:
+ description:
+ - The name of the swarm.
+ type: str
+ labels:
+ description:
+ - User-defined key/value metadata.
+ - Label operations in this module apply to the docker swarm cluster.
+ Use M(community.docker.docker_node) module to add/modify/remove swarm node labels.
+ - Requires API version >= 1.32.
+ type: dict
+ signing_ca_cert:
+ description:
+ - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a certificate, but the contents of the certificate.
+ - Requires API version >= 1.30.
+ type: str
+ signing_ca_key:
+ description:
+ - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a key, but the contents of the key.
+ - Requires API version >= 1.30.
+ type: str
+ ca_force_rotate:
+ description:
+ - An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
+ if none have been specified.
+ - Docker default value is C(0).
+ - Requires API version >= 1.30.
+ type: int
+ autolock_managers:
+ description:
+ - If set, generate a key and use it to lock data stored on the managers.
+ - Docker default value is C(false).
+ - M(community.docker.docker_swarm_info) can be used to retrieve the unlock key.
+ type: bool
+ rotate_worker_token:
+ description: Rotate the worker join token.
+ type: bool
+ default: false
+ rotate_manager_token:
+ description: Rotate the manager join token.
+ type: bool
+ default: false
+ data_path_addr:
+ description:
+ - Address or interface to use for data path traffic.
+ - This can either be an address in the form C(192.168.1.1), or an interface,
+ like C(eth0).
+ - Only used when swarm is initialised or joined. Because of this it is not
+ considered for idempotency checking.
+ type: str
+ version_added: 2.5.0
+ data_path_port:
+ description:
+ - Port to use for data path traffic.
+ - This needs to be a port number like C(9789).
+ - Only used when swarm is initialised. Because of this it is not
+ considered for idempotency checking.
+ type: int
+ version_added: 3.1.0
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0"
+ - Docker API >= 1.25
+author:
+ - Thierry Bouvet (@tbouvet)
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+'''
+
+EXAMPLES = '''
+
+- name: Init a new swarm with default parameters
+ community.docker.docker_swarm:
+ state: present
+
+- name: Update swarm configuration
+ community.docker.docker_swarm:
+ state: present
+ election_tick: 5
+
+- name: Add nodes
+ community.docker.docker_swarm:
+ state: join
+ advertise_addr: 192.168.1.2
+ join_token: SWMTKN-1--xxxxx
+ remote_addrs: [ '192.168.1.1:2377' ]
+
+- name: Leave swarm for a node
+ community.docker.docker_swarm:
+ state: absent
+
+- name: Remove a swarm manager
+ community.docker.docker_swarm:
+ state: absent
+ force: true
+
+- name: Remove node from swarm
+ community.docker.docker_swarm:
+ state: remove
+ node_id: mynode
+
+- name: Init a new swarm with different data path interface
+ community.docker.docker_swarm:
+ state: present
+ advertise_addr: eth0
+ data_path_addr: ens10
+
+- name: Init a new swarm with a different data path port
+ community.docker.docker_swarm:
+ state: present
+ data_path_port: 9789
+'''
+
+RETURN = '''
+swarm_facts:
+ description: Informations about swarm.
+ returned: success
+ type: dict
+ contains:
+ JoinTokens:
+ description: Tokens to connect to the Swarm.
+ returned: success
+ type: dict
+ contains:
+ Worker:
+ description:
+ - Token to join the cluster as a new *worker* node.
+ - "B(Note:) if this value has been specified as I(join_token), the value here will not
+ be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass I(join_token),
+ make sure your playbook/role does not depend on this return value!"
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ Manager:
+ description:
+ - Token to join the cluster as a new *manager* node.
+ - "B(Note:) if this value has been specified as I(join_token), the value here will not
+ be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass I(join_token),
+ make sure your playbook/role does not depend on this return value!"
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ UnlockKey:
+ description: The swarm unlock-key if I(autolock_managers) is C(true).
+ returned: on success if I(autolock_managers) is C(true)
+ and swarm is initialised, or if I(autolock_managers) has changed.
+ type: str
+ example: SWMKEY-1-xxx
+
+actions:
+ description: Provides the actions done on the swarm.
+ returned: when action failed.
+ type: list
+ elements: str
+ example: ['This cluster is already a swarm cluster']
+
+'''
+
+import json
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ DockerBaseClass,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DifferenceTracker,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+from ansible.module_utils.common.text.converters import to_native
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self):
+ super(TaskParameters, self).__init__()
+
+ self.advertise_addr = None
+ self.listen_addr = None
+ self.remote_addrs = None
+ self.join_token = None
+ self.data_path_addr = None
+ self.data_path_port = None
+
+ # Spec
+ self.snapshot_interval = None
+ self.task_history_retention_limit = None
+ self.keep_old_snapshots = None
+ self.log_entries_for_slow_followers = None
+ self.heartbeat_tick = None
+ self.election_tick = None
+ self.dispatcher_heartbeat_period = None
+ self.node_cert_expiry = None
+ self.name = None
+ self.labels = None
+ self.log_driver = None
+ self.signing_ca_cert = None
+ self.signing_ca_key = None
+ self.ca_force_rotate = None
+ self.autolock_managers = None
+ self.rotate_worker_token = None
+ self.rotate_manager_token = None
+ self.default_addr_pool = None
+ self.subnet_size = None
+
+ @staticmethod
+ def from_ansible_params(client):
+ result = TaskParameters()
+ for key, value in client.module.params.items():
+ if key in result.__dict__:
+ setattr(result, key, value)
+
+ result.update_parameters(client)
+ return result
+
+ def update_from_swarm_info(self, swarm_info):
+ spec = swarm_info['Spec']
+
+ ca_config = spec.get('CAConfig') or dict()
+ if self.node_cert_expiry is None:
+ self.node_cert_expiry = ca_config.get('NodeCertExpiry')
+ if self.ca_force_rotate is None:
+ self.ca_force_rotate = ca_config.get('ForceRotate')
+
+ dispatcher = spec.get('Dispatcher') or dict()
+ if self.dispatcher_heartbeat_period is None:
+ self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
+
+ raft = spec.get('Raft') or dict()
+ if self.snapshot_interval is None:
+ self.snapshot_interval = raft.get('SnapshotInterval')
+ if self.keep_old_snapshots is None:
+ self.keep_old_snapshots = raft.get('KeepOldSnapshots')
+ if self.heartbeat_tick is None:
+ self.heartbeat_tick = raft.get('HeartbeatTick')
+ if self.log_entries_for_slow_followers is None:
+ self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
+ if self.election_tick is None:
+ self.election_tick = raft.get('ElectionTick')
+
+ orchestration = spec.get('Orchestration') or dict()
+ if self.task_history_retention_limit is None:
+ self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
+
+ encryption_config = spec.get('EncryptionConfig') or dict()
+ if self.autolock_managers is None:
+ self.autolock_managers = encryption_config.get('AutoLockManagers')
+
+ if self.name is None:
+ self.name = spec['Name']
+
+ if self.labels is None:
+ self.labels = spec.get('Labels') or {}
+
+ if 'LogDriver' in spec['TaskDefaults']:
+ self.log_driver = spec['TaskDefaults']['LogDriver']
+
+ def update_parameters(self, client):
+ assign = dict(
+ snapshot_interval='snapshot_interval',
+ task_history_retention_limit='task_history_retention_limit',
+ keep_old_snapshots='keep_old_snapshots',
+ log_entries_for_slow_followers='log_entries_for_slow_followers',
+ heartbeat_tick='heartbeat_tick',
+ election_tick='election_tick',
+ dispatcher_heartbeat_period='dispatcher_heartbeat_period',
+ node_cert_expiry='node_cert_expiry',
+ name='name',
+ labels='labels',
+ signing_ca_cert='signing_ca_cert',
+ signing_ca_key='signing_ca_key',
+ ca_force_rotate='ca_force_rotate',
+ autolock_managers='autolock_managers',
+ log_driver='log_driver',
+ )
+ params = dict()
+ for dest, source in assign.items():
+ if not client.option_minimal_versions[source]['supported']:
+ continue
+ value = getattr(self, source)
+ if value is not None:
+ params[dest] = value
+ self.spec = client.create_swarm_spec(**params)
+
+ def compare_to_active(self, other, client, differences):
+ for k in self.__dict__:
+ if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
+ 'rotate_worker_token', 'rotate_manager_token', 'spec',
+ 'default_addr_pool', 'subnet_size', 'data_path_addr',
+ 'data_path_port'):
+ continue
+ if not client.option_minimal_versions[k]['supported']:
+ continue
+ value = getattr(self, k)
+ if value is None:
+ continue
+ other_value = getattr(other, k)
+ if value != other_value:
+ differences.add(k, parameter=value, active=other_value)
+ if self.rotate_worker_token:
+ differences.add('rotate_worker_token', parameter=True, active=False)
+ if self.rotate_manager_token:
+ differences.add('rotate_manager_token', parameter=True, active=False)
+ return differences
+
+
+class SwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+ self.swarm_info = {}
+
+ self.state = client.module.params['state']
+ self.force = client.module.params['force']
+ self.node_id = client.module.params['node_id']
+
+ self.differences = DifferenceTracker()
+ self.parameters = TaskParameters.from_ansible_params(client)
+
+ self.created = False
+
+ def __call__(self):
+ choice_map = {
+ "present": self.init_swarm,
+ "join": self.join,
+ "absent": self.leave,
+ "remove": self.remove,
+ }
+
+ choice_map.get(self.state)()
+
+ if self.client.module._diff or self.parameters.debug:
+ diff = dict()
+ diff['before'], diff['after'] = self.differences.get_before_after()
+ self.results['diff'] = diff
+
+ def inspect_swarm(self):
+ try:
+ data = self.client.inspect_swarm()
+ json_str = json.dumps(data, ensure_ascii=False)
+ self.swarm_info = json.loads(json_str)
+
+ self.results['changed'] = False
+ self.results['swarm_facts'] = self.swarm_info
+
+ unlock_key = self.get_unlock_key()
+ self.swarm_info.update(unlock_key)
+ except APIError:
+ return
+
+ def get_unlock_key(self):
+ default = {'UnlockKey': None}
+ if not self.has_swarm_lock_changed():
+ return default
+ try:
+ return self.client.get_unlock_key() or default
+ except APIError:
+ return default
+
+ def has_swarm_lock_changed(self):
+ return self.parameters.autolock_managers and (
+ self.created or self.differences.has_difference_for('autolock_managers')
+ )
+
+ def init_swarm(self):
+ if not self.force and self.client.check_if_swarm_manager():
+ self.__update_swarm()
+ return
+
+ if not self.check_mode:
+ init_arguments = {
+ 'advertise_addr': self.parameters.advertise_addr,
+ 'listen_addr': self.parameters.listen_addr,
+ 'data_path_addr': self.parameters.data_path_addr,
+ 'force_new_cluster': self.force,
+ 'swarm_spec': self.parameters.spec,
+ }
+ if self.parameters.default_addr_pool is not None:
+ init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
+ if self.parameters.subnet_size is not None:
+ init_arguments['subnet_size'] = self.parameters.subnet_size
+ if self.parameters.data_path_port is not None:
+ init_arguments['data_path_port'] = self.parameters.data_path_port
+ try:
+ self.client.init_swarm(**init_arguments)
+ except APIError as exc:
+ self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
+
+ if not self.client.check_if_swarm_manager():
+ if not self.check_mode:
+ self.client.fail("Swarm not created or other error!")
+
+ self.created = True
+ self.inspect_swarm()
+ self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
+ self.differences.add('state', parameter='present', active='absent')
+ self.results['changed'] = True
+ self.results['swarm_facts'] = {
+ 'JoinTokens': self.swarm_info.get('JoinTokens'),
+ 'UnlockKey': self.swarm_info.get('UnlockKey')
+ }
+
+ def __update_swarm(self):
+ try:
+ self.inspect_swarm()
+ version = self.swarm_info['Version']['Index']
+ self.parameters.update_from_swarm_info(self.swarm_info)
+ old_parameters = TaskParameters()
+ old_parameters.update_from_swarm_info(self.swarm_info)
+ self.parameters.compare_to_active(old_parameters, self.client, self.differences)
+ if self.differences.empty:
+ self.results['actions'].append("No modification")
+ self.results['changed'] = False
+ return
+ update_parameters = TaskParameters.from_ansible_params(self.client)
+ update_parameters.update_parameters(self.client)
+ if not self.check_mode:
+ self.client.update_swarm(
+ version=version, swarm_spec=update_parameters.spec,
+ rotate_worker_token=self.parameters.rotate_worker_token,
+ rotate_manager_token=self.parameters.rotate_manager_token)
+ except APIError as exc:
+ self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
+ return
+
+ self.inspect_swarm()
+ self.results['actions'].append("Swarm cluster updated")
+ self.results['changed'] = True
+
+ def join(self):
+ if self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is already part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.join_swarm(
+ remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
+ listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr,
+ data_path_addr=self.parameters.data_path_addr)
+ except APIError as exc:
+ self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("New node is added to swarm cluster")
+ self.differences.add('joined', parameter=True, active=False)
+ self.results['changed'] = True
+
+ def leave(self):
+ if not self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is not part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.leave_swarm(force=self.force)
+ except APIError as exc:
+ self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node has left the swarm cluster")
+ self.differences.add('joined', parameter='absent', active='present')
+ self.results['changed'] = True
+
+ def remove(self):
+ if not self.client.check_if_swarm_manager():
+ self.client.fail("This node is not a manager.")
+
+ try:
+ status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
+ except APIError:
+ return
+
+ if not status_down:
+ self.client.fail("Can not remove the node. The status node is ready and not down.")
+
+ if not self.check_mode:
+ try:
+ self.client.remove_node(node_id=self.node_id, force=self.force)
+ except APIError as exc:
+ self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node is removed from swarm cluster.")
+ self.differences.add('joined', parameter=False, active=True)
+ self.results['changed'] = True
+
+
+def _detect_remove_operation(client):
+ return client.module.params['state'] == 'remove'
+
+
+def main():
+ argument_spec = dict(
+ advertise_addr=dict(type='str'),
+ data_path_addr=dict(type='str'),
+ data_path_port=dict(type='int'),
+ state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove']),
+ force=dict(type='bool', default=False),
+ listen_addr=dict(type='str', default='0.0.0.0:2377'),
+ remote_addrs=dict(type='list', elements='str'),
+ join_token=dict(type='str', no_log=True),
+ snapshot_interval=dict(type='int'),
+ task_history_retention_limit=dict(type='int'),
+ keep_old_snapshots=dict(type='int'),
+ log_entries_for_slow_followers=dict(type='int'),
+ heartbeat_tick=dict(type='int'),
+ election_tick=dict(type='int'),
+ dispatcher_heartbeat_period=dict(type='int'),
+ node_cert_expiry=dict(type='int'),
+ name=dict(type='str'),
+ labels=dict(type='dict'),
+ signing_ca_cert=dict(type='str'),
+ signing_ca_key=dict(type='str', no_log=True),
+ ca_force_rotate=dict(type='int'),
+ autolock_managers=dict(type='bool'),
+ node_id=dict(type='str'),
+ rotate_worker_token=dict(type='bool', default=False),
+ rotate_manager_token=dict(type='bool', default=False),
+ default_addr_pool=dict(type='list', elements='str'),
+ subnet_size=dict(type='int'),
+ )
+
+ required_if = [
+ ('state', 'join', ['remote_addrs', 'join_token']),
+ ('state', 'remove', ['node_id'])
+ ]
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
+ signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ autolock_managers=dict(docker_py_version='2.6.0'),
+ log_driver=dict(docker_py_version='2.6.0'),
+ remove_operation=dict(
+ docker_py_version='2.4.0',
+ detect_usage=_detect_remove_operation,
+ usage_msg='remove swarm nodes'
+ ),
+ default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ data_path_addr=dict(docker_py_version='4.0.0', docker_api_version='1.30'),
+ data_path_port=dict(docker_py_version='6.0.0', docker_api_version='1.40'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='1.10.0',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ result='',
+ actions=[]
+ )
+
+ SwarmManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py b/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py
new file mode 100644
index 000000000..df1e5af95
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py
@@ -0,0 +1,388 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_info
+
+short_description: Retrieves facts about Docker Swarm cluster.
+
+description:
+ - Retrieves facts about a Docker Swarm.
+ - Returns lists of swarm objects names for the services - nodes, services, tasks.
+ - The output differs depending on API version available on docker host.
+ - Must be run on Swarm Manager node; otherwise module fails with error message.
+ It does return boolean flags in on both error and success which indicate whether
+ the docker daemon can be communicated with, whether it is in Swarm mode, and
+ whether it is a Swarm Manager node.
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ nodes:
+ description:
+ - Whether to list swarm nodes.
+ type: bool
+ default: false
+ nodes_filters:
+ description:
+ - A dictionary of filter values used for selecting nodes to list.
+ - "For example, C(name: mynode)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ services:
+ description:
+ - Whether to list swarm services.
+ type: bool
+ default: false
+ services_filters:
+ description:
+ - A dictionary of filter values used for selecting services to list.
+ - "For example, C(name: myservice)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ tasks:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: false
+ tasks_filters:
+ description:
+ - A dictionary of filter values used for selecting tasks to list.
+ - "For example, C(node: mynode-1)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering)
+ for more information on possible filters.
+ type: dict
+ unlock_key:
+ description:
+ - Whether to retrieve the swarm unlock key.
+ type: bool
+ default: false
+ verbose_output:
+ description:
+ - When set to C(true) and I(nodes), I(services) or I(tasks) is set to C(true), then the module output will
+ contain verbose information about objects matching the full output of API method.
+ - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: false
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get info on Docker Swarm
+ community.docker.docker_swarm_info:
+ ignore_errors: true
+ register: result
+
+- name: Inform about basic flags
+ ansible.builtin.debug:
+ msg: |
+ Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
+ Docker in Swarm mode: {{ result.docker_swarm_active }}
+ This is a Manager node: {{ result.docker_swarm_manager }}
+
+- name: Get info on Docker Swarm and list of registered nodes
+ community.docker.docker_swarm_info:
+ nodes: true
+ register: result
+
+- name: Get info on Docker Swarm and extended list of registered nodes
+ community.docker.docker_swarm_info:
+ nodes: true
+ verbose_output: true
+ register: result
+
+- name: Get info on Docker Swarm and filtered list of registered nodes
+ community.docker.docker_swarm_info:
+ nodes: true
+ nodes_filters:
+ name: mynode
+ register: result
+
+- name: Show swarm facts
+ ansible.builtin.debug:
+ var: result.swarm_facts
+
+- name: Get the swarm unlock key
+ community.docker.docker_swarm_info:
+ unlock_key: true
+ register: result
+
+- name: Print swarm unlock key
+ ansible.builtin.debug:
+ var: result.swarm_unlock_key
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+docker_swarm_active:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ and the docker daemon is in Swarm mode.
+ returned: both on success and on error
+ type: bool
+docker_swarm_manager:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ the docker daemon is in Swarm mode, and the current node is
+ a manager node.
+ - Only if this one is C(true), the module will not fail.
+ returned: both on success and on error
+ type: bool
+swarm_facts:
+ description:
+ - Facts representing the basic state of the docker Swarm cluster.
+ - Contains tokens to connect to the Swarm
+ returned: always
+ type: dict
+swarm_unlock_key:
+ description:
+ - Contains the key needed to unlock the swarm.
+ returned: When I(unlock_key) is C(true).
+ type: str
+nodes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker node ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(nodes) is C(true)
+ type: list
+ elements: dict
+services:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ls) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(services) is C(true)
+ type: list
+ elements: dict
+tasks:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ps) output unless I(verbose_output=true).
+ See description for I(verbose_output).
+ returned: When I(tasks) is C(true)
+ type: list
+ elements: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker_common
+ pass
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+from ansible_collections.community.docker.plugins.module_utils.common import RequestException
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ clean_dict_booleans_for_docker_api,
+)
+
+
+class DockerSwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerSwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['tasks', 'services', 'nodes']
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.results['swarm_facts'] = self.get_docker_swarm_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+ if self.client.module.params['unlock_key']:
+ self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
+
+ def get_docker_swarm_facts(self):
+ try:
+ return self.client.inspect_swarm()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None):
+ items = None
+ items_list = []
+
+ try:
+ if docker_object == 'nodes':
+ items = self.client.nodes(filters=filters)
+ elif docker_object == 'tasks':
+ items = self.client.tasks(filters=filters)
+ elif docker_object == 'services':
+ items = self.client.services(filters=filters)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ return items
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'nodes':
+ item_record = self.get_essential_facts_nodes(item)
+ elif docker_object == 'tasks':
+ item_record = self.get_essential_facts_tasks(item)
+ elif docker_object == 'services':
+ item_record = self.get_essential_facts_services(item)
+ if item_record['Mode'] == 'Global':
+ item_record['Replicas'] = len(items)
+ items_list.append(item_record)
+
+ return items_list
+
+ @staticmethod
+ def get_essential_facts_nodes(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item.get('ID')
+ object_essentials['Hostname'] = item['Description']['Hostname']
+ object_essentials['Status'] = item['Status']['State']
+ object_essentials['Availability'] = item['Spec']['Availability']
+ if 'ManagerStatus' in item:
+ object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
+ if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
+ object_essentials['ManagerStatus'] = "Leader"
+ else:
+ object_essentials['ManagerStatus'] = None
+ object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
+
+ return object_essentials
+
+ def get_essential_facts_tasks(self, item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ # Returning container ID to not trigger another connection to host
+ # Container ID is sufficient to get extended info in other tasks
+ object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
+ object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
+ object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
+ object_essentials['DesiredState'] = item['DesiredState']
+ object_essentials['CurrentState'] = item['Status']['State']
+ if 'Err' in item['Status']:
+ object_essentials['Error'] = item['Status']['Err']
+ else:
+ object_essentials['Error'] = None
+
+ return object_essentials
+
+ @staticmethod
+ def get_essential_facts_services(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ object_essentials['Name'] = item['Spec']['Name']
+ if 'Replicated' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Replicated"
+ object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
+ elif 'Global' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Global"
+ # Number of replicas have to be updated in calling method or may be left as None
+ object_essentials['Replicas'] = None
+ object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
+ if item['Spec'].get('EndpointSpec') and 'Ports' in item['Spec']['EndpointSpec']:
+ object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
+ else:
+ object_essentials['Ports'] = []
+
+ return object_essentials
+
+ def get_docker_swarm_unlock_key(self):
+ unlock_key = self.client.get_unlock_key() or {}
+ return unlock_key.get('UnlockKey') or None
+
+
+def main():
+ argument_spec = dict(
+ nodes=dict(type='bool', default=False),
+ nodes_filters=dict(type='dict'),
+ tasks=dict(type='bool', default=False),
+ tasks_filters=dict(type='dict'),
+ services=dict(type='bool', default=False),
+ services_filters=dict(type='dict'),
+ unlock_key=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+ option_minimal_versions = dict(
+ unlock_key=dict(docker_py_version='2.7.0'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ docker_swarm_active=False,
+ docker_swarm_manager=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+ client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
+ client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerSwarmManager(client, results)
+ results.update(client.fail_results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py b/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py
new file mode 100644
index 000000000..564234cb5
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py
@@ -0,0 +1,2866 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2017, Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service
+author:
+ - "Dario Zanzico (@dariko)"
+ - "Jason Witkowski (@jwitko)"
+ - "Hannes Ljungberg (@hannseman)"
+ - "Piotr Wojciechowski (@wojciechowskipiotr)"
+short_description: docker swarm service
+description:
+ - Manages docker services via a swarm manager node.
+ - This modules does not support updating services in a stack.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_2_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ args:
+ description:
+ - List arguments to be passed to the container.
+ - Corresponds to the C(ARG) parameter of C(docker service create).
+ type: list
+ elements: str
+ command:
+ description:
+ - Command to execute when the container starts.
+ - A command may be either a string or a list or a list of strings.
+ - Corresponds to the C(COMMAND) parameter of C(docker service create).
+ type: raw
+ configs:
+ description:
+ - List of dictionaries describing the service configs.
+ - Corresponds to the C(--config) option of C(docker service create).
+ - Requires API version >= 1.30.
+ type: list
+ elements: dict
+ suboptions:
+ config_id:
+ description:
+ - Config's ID.
+ type: str
+ config_name:
+ description:
+ - Config's name as defined at its creation.
+ type: str
+ required: true
+ filename:
+ description:
+ - Name of the file containing the config. Defaults to the I(config_name) if not specified.
+ type: str
+ uid:
+ description:
+ - UID of the config file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the config file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ container_labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--container-label) option of C(docker service create).
+ type: dict
+ dns:
+ description:
+ - List of custom DNS servers.
+ - Corresponds to the C(--dns) option of C(docker service create).
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of custom DNS search domains.
+ - Corresponds to the C(--dns-search) option of C(docker service create).
+ type: list
+ elements: str
+ dns_options:
+ description:
+ - List of custom DNS options.
+ - Corresponds to the C(--dns-option) option of C(docker service create).
+ type: list
+ elements: str
+ endpoint_mode:
+ description:
+ - Service endpoint mode.
+ - Corresponds to the C(--endpoint-mode) option of C(docker service create).
+ type: str
+ choices:
+ - vip
+ - dnsrr
+ env:
+ description:
+ - List or dictionary of the service environment variables.
+ - If passed a list each items need to be in the format of C(KEY=VALUE).
+ - If passed a dictionary values which might be parsed as numbers,
+ booleans or other types by the YAML parser must be quoted (for example C("true"))
+ in order to avoid data loss.
+ - Corresponds to the C(--env) option of C(docker service create).
+ type: raw
+ env_files:
+ description:
+ - List of paths to files, present on the target, containing environment variables C(FOO=BAR).
+ - The order of the list is significant in determining the value assigned to a
+ variable that shows up more than once.
+ - If variable also present in I(env), then I(env) value will override.
+ type: list
+ elements: path
+ force_update:
+ description:
+ - Force update even if no changes require it.
+ - Corresponds to the C(--force) option of C(docker service update).
+ type: bool
+ default: false
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ - Corresponds to the C(--group) option of C(docker service update).
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work.
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ type: str
+ retries:
+ description:
+ - Consecutive failures needed to report unhealthy. It accept integer value.
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ type: str
+ hostname:
+ description:
+ - Container hostname.
+ - Corresponds to the C(--hostname) option of C(docker service create).
+ type: str
+ hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's /etc/hosts file.
+ - Corresponds to the C(--host) option of C(docker service create).
+ type: dict
+ image:
+ description:
+ - Service image path and tag.
+ - Corresponds to the C(IMAGE) parameter of C(docker service create).
+ type: str
+ init:
+ description:
+ - Use an init inside each service container to forward signals and reap processes.
+ - Corresponds to the C(--init) option of C(docker service create).
+ - Requires API version >= 1.37.
+ type: bool
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--label) option of C(docker service create).
+ type: dict
+ limits:
+ description:
+ - Configures service resource limits.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ type: str
+ type: dict
+ logging:
+ description:
+ - "Logging configuration for the service."
+ suboptions:
+ driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ type: str
+ options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ type: dict
+ type: dict
+ mode:
+ description:
+ - Service replication mode.
+ - Service will be removed and recreated when changed.
+ - Corresponds to the C(--mode) option of C(docker service create).
+ type: str
+ default: replicated
+ choices:
+ - replicated
+ - global
+ mounts:
+ description:
+ - List of dictionaries describing the service mounts.
+ - Corresponds to the C(--mount) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ source:
+ description:
+ - Mount source (for example a volume name or a host path).
+ - Must be specified if I(type) is not C(tmpfs).
+ type: str
+ target:
+ description:
+ - Container path.
+ type: str
+ required: true
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9.
+ type: str
+ default: bind
+ choices:
+ - bind
+ - volume
+ - tmpfs
+ - npipe
+ readonly:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ labels:
+ description:
+ - Volume labels to apply.
+ type: dict
+ propagation:
+ description:
+ - The propagation mode to use.
+ - Can only be used when I(type) is C(bind).
+ type: str
+ choices:
+ - shared
+ - slave
+ - private
+ - rshared
+ - rslave
+ - rprivate
+ no_copy:
+ description:
+ - Disable copying of data from a container when a volume is created.
+ - Can only be used when I(type) is C(volume).
+ type: bool
+ driver_config:
+ description:
+ - Volume driver configuration.
+ - Can only be used when I(type) is C(volume).
+ suboptions:
+ name:
+ description:
+ - Name of the volume-driver plugin to use for the volume.
+ type: str
+ options:
+ description:
+ - Options as key-value pairs to pass to the driver for this volume.
+ type: dict
+ type: dict
+ tmpfs_size:
+ description:
+ - "Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Can only be used when I(type) is C(tmpfs).
+ type: str
+ tmpfs_mode:
+ description:
+ - File mode of the tmpfs in octal.
+ - Can only be used when I(type) is C(tmpfs).
+ type: int
+ name:
+ description:
+ - Service name.
+ - Corresponds to the C(--name) option of C(docker service create).
+ type: str
+ required: true
+ networks:
+ description:
+ - List of the service networks names or dictionaries.
+ - When passed dictionaries valid sub-options are I(name), which is required, and
+ I(aliases) and I(options).
+ - Prior to API version 1.29, updating and removing networks is not supported.
+ If changes are made the service will then be removed and recreated.
+ - Corresponds to the C(--network) option of C(docker service create).
+ type: list
+ elements: raw
+ placement:
+ description:
+ - Configures service placement preferences and constraints.
+ suboptions:
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ type: list
+ elements: str
+ preferences:
+ description:
+ - List of the placement preferences as key value pairs.
+ - Corresponds to the C(--placement-pref) option of C(docker service create).
+ - Requires API version >= 1.27.
+ type: list
+ elements: dict
+ replicas_max_per_node:
+ description:
+ - Maximum number of tasks per node.
+ - Corresponds to the C(--replicas_max_per_node) option of C(docker service create).
+ - Requires API version >= 1.40
+ type: int
+ version_added: 1.3.0
+ type: dict
+ publish:
+ description:
+ - List of dictionaries describing the service published ports.
+ - Corresponds to the C(--publish) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ published_port:
+ description:
+ - The port to make externally available.
+ type: int
+ required: false
+ target_port:
+ description:
+ - The port inside the container to expose.
+ type: int
+ required: true
+ protocol:
+ description:
+ - What protocol to use.
+ type: str
+ default: tcp
+ choices:
+ - tcp
+ - udp
+ mode:
+ description:
+ - What publish mode to use.
+ - Requires API version >= 1.32.
+ type: str
+ choices:
+ - ingress
+ - host
+ read_only:
+ description:
+ - Mount the containers root filesystem as read only.
+ - Corresponds to the C(--read-only) option of C(docker service create).
+ type: bool
+ replicas:
+ description:
+ - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated).
+ - If set to C(-1), and service is not present, service replicas will be set to C(1).
+ - If set to C(-1), and service is present, service replicas will be unchanged.
+ - Corresponds to the C(--replicas) option of C(docker service create).
+ type: int
+ default: -1
+ reservations:
+ description:
+ - Configures service resource reservations.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ type: str
+ type: dict
+ resolve_image:
+ description:
+ - If the current image digest should be resolved from registry and updated if changed.
+ - Requires API version >= 1.30.
+ type: bool
+ default: false
+ restart_config:
+ description:
+ - Configures if and how to restart containers when they exit.
+ suboptions:
+ condition:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ type: str
+ max_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: int
+ window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ type: str
+ type: dict
+ rollback_config:
+ description:
+ - Configures how the service should be rolled back in case of a failing update.
+ suboptions:
+ parallelism:
+ description:
+ - The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously.
+ - Corresponds to the C(--rollback-parallelism) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: int
+ delay:
+ description:
+ - Delay between task rollbacks.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-delay) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of rollback failure.
+ - Corresponds to the C(--rollback-failure-action) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ choices:
+ - continue
+ - pause
+ monitor:
+ description:
+ - Duration after each task rollback to monitor for failure.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-monitor) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during a rollback.
+ - Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations during rollbacks.
+ - Corresponds to the C(--rollback-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ secrets:
+ description:
+ - List of dictionaries describing the service secrets.
+ - Corresponds to the C(--secret) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ secret_id:
+ description:
+ - Secret's ID.
+ type: str
+ secret_name:
+ description:
+ - Secret's name as defined at its creation.
+ type: str
+ required: true
+ filename:
+ description:
+ - Name of the file containing the secret. Defaults to the I(secret_name) if not specified.
+ - Corresponds to the C(target) key of C(docker service create --secret).
+ type: str
+ uid:
+ description:
+ - UID of the secret file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the secret file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ state:
+ description:
+ - C(absent) - A service matching the specified name will be removed and have its tasks stopped.
+ - C(present) - Asserts the existence of a service matching the name and provided configuration parameters.
+ Unspecified configuration parameters will be set to docker defaults.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ stop_grace_period:
+ description:
+ - Time to wait before force killing a container.
+ - "Accepts a duration as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--stop-grace-period) option of C(docker service create).
+ type: str
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ - Corresponds to the C(--stop-signal) option of C(docker service create).
+ type: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - Corresponds to the C(--tty) option of C(docker service create).
+ type: bool
+ update_config:
+ description:
+ - Configures how the service should be updated. Useful for configuring rolling updates.
+ suboptions:
+ parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ type: int
+ delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ type: float
+ order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used for the specified command.
+ - Before Ansible 2.8, the default value for this option was C(root).
+ - The default has been removed so that the user defined in the image is used if no user is specified here.
+ - Corresponds to the C(--user) option of C(docker service create).
+ type: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ - Corresponds to the C(--workdir) option of C(docker service create).
+ type: str
+ cap_add:
+ description:
+ - List of capabilities to add to the container.
+ - Requires API version >= 1.41.
+ type: list
+ elements: str
+ version_added: 2.2.0
+ cap_drop:
+ description:
+ - List of capabilities to drop from the container.
+ - Requires API version >= 1.41.
+ type: list
+ elements: str
+ version_added: 2.2.0
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2"
+ - "Docker API >= 1.25"
+notes:
+ - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0.
+ When using older versions use C(force_update: true) to trigger the swarm to resolve a new image."
+'''
+
+RETURN = '''
+swarm_service:
+ returned: always
+ type: dict
+ description:
+ - Dictionary of variables representing the current state of the service.
+ Matches the module parameters format.
+ - Note that facts are not part of registered vars but accessible directly.
+ - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service),
+ while the module actually returned a variable called C(ansible_docker_service). The variable
+ was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0.
+ In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used.
+ sample: '{
+ "args": [
+ "3600"
+ ],
+ "cap_add": null,
+ "cap_drop": [
+ "ALL"
+ ],
+ "command": [
+ "sleep"
+ ],
+ "configs": null,
+ "constraints": [
+ "node.role == manager",
+ "engine.labels.operatingsystem == ubuntu 14.04"
+ ],
+ "container_labels": null,
+ "dns": null,
+ "dns_options": null,
+ "dns_search": null,
+ "endpoint_mode": null,
+ "env": [
+ "ENVVAR1=envvar1",
+ "ENVVAR2=envvar2"
+ ],
+ "force_update": null,
+ "groups": null,
+ "healthcheck": {
+ "interval": 90000000000,
+ "retries": 3,
+ "start_period": 30000000000,
+ "test": [
+ "CMD",
+ "curl",
+ "--fail",
+ "http://nginx.host.com"
+ ],
+ "timeout": 10000000000
+ },
+ "healthcheck_disabled": false,
+ "hostname": null,
+ "hosts": null,
+ "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8",
+ "labels": {
+ "com.example.department": "Finance",
+ "com.example.description": "Accounting webapp"
+ },
+ "limit_cpu": 0.5,
+ "limit_memory": 52428800,
+ "log_driver": "fluentd",
+ "log_driver_options": {
+ "fluentd-address": "127.0.0.1:24224",
+ "fluentd-async-connect": "true",
+ "tag": "myservice"
+ },
+ "mode": "replicated",
+ "mounts": [
+ {
+ "readonly": false,
+ "source": "/tmp/",
+ "target": "/remote_tmp/",
+ "type": "bind",
+ "labels": null,
+ "propagation": null,
+ "no_copy": null,
+ "driver_config": null,
+ "tmpfs_size": null,
+ "tmpfs_mode": null
+ }
+ ],
+ "networks": null,
+ "placement_preferences": [
+ {
+ "spread": "node.labels.mylabel"
+ }
+ ],
+ "publish": null,
+ "read_only": null,
+ "replicas": 1,
+ "replicas_max_per_node": 1,
+ "reserve_cpu": 0.25,
+ "reserve_memory": 20971520,
+ "restart_policy": "on-failure",
+ "restart_policy_attempts": 3,
+ "restart_policy_delay": 5000000000,
+ "restart_policy_window": 120000000000,
+ "secrets": null,
+ "stop_grace_period": null,
+ "stop_signal": null,
+ "tty": null,
+ "update_delay": 10000000000,
+ "update_failure_action": null,
+ "update_max_failure_ratio": null,
+ "update_monitor": null,
+ "update_order": "stop-first",
+ "update_parallelism": 2,
+ "user": null,
+ "working_dir": null
+ }'
+changes:
+ returned: always
+ description:
+ - List of changed service attributes if a service has been altered, [] otherwise.
+ type: list
+ elements: str
+ sample: ['container_labels', 'replicas']
+rebuilt:
+ returned: always
+ description:
+ - True if the service has been recreated (removed and created)
+ type: bool
+ sample: true
+'''
+
+EXAMPLES = '''
+- name: Set command and arguments
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ command: sleep
+ args:
+ - "3600"
+
+- name: Set a bind mount
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ mounts:
+ - source: /tmp/
+ target: /remote_tmp/
+ type: bind
+
+- name: Set service labels
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ labels:
+ com.example.description: "Accounting webapp"
+ com.example.department: "Finance"
+
+- name: Set environment variables
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ env:
+ ENVVAR1: envvar1
+ ENVVAR2: envvar2
+ env_files:
+ - envs/common.env
+ - envs/apps/web.env
+
+- name: Set fluentd logging
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ logging:
+ driver: fluentd
+ options:
+ fluentd-address: "127.0.0.1:24224"
+ fluentd-async-connect: "true"
+ tag: myservice
+
+- name: Set restart policies
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ restart_config:
+ condition: on-failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+
+- name: Set update config
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set rollback config
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ failure_action: rollback
+ rollback_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set placement preferences
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ placement:
+ preferences:
+ - spread: node.labels.mylabel
+ constraints:
+ - node.role == manager
+ - engine.labels.operatingsystem == ubuntu 14.04
+ replicas_max_per_node: 2
+
+- name: Set configs
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ configs:
+ - config_name: myconfig_name
+ filename: "/tmp/config.txt"
+
+- name: Set networks
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - mynetwork
+
+- name: Set networks as a dictionary
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - name: "mynetwork"
+ aliases:
+ - "mynetwork_alias"
+ options:
+ foo: bar
+
+- name: Set secrets
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ secrets:
+ - secret_name: mysecret_name
+ filename: "/run/secrets/secret.txt"
+
+- name: Start service with healthcheck
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: nginx:1.13
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Configure service resources
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ reservations:
+ cpus: 0.25
+ memory: 20M
+ limits:
+ cpus: 0.50
+ memory: 50M
+
+- name: Remove service
+ community.docker.docker_swarm_service:
+ name: myservice
+ state: absent
+'''
+
+import shlex
+import time
+import traceback
+
+from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DifferenceTracker,
+ DockerBaseClass,
+ convert_duration_to_nanosecond,
+ parse_healthcheck,
+ clean_dict_booleans_for_docker_api,
+)
+
+from ansible.module_utils.basic import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils.common.text.converters import to_text, to_native
+
+try:
+ from docker import types
+ from docker.utils import (
+ parse_repository_tag,
+ parse_env_file,
+ format_environment,
+ )
+ from docker.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+ )
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_docker_environment(env, env_files):
+ """
+ Will return a list of "KEY=VALUE" items. Supplied env variable can
+ be either a list or a dictionary.
+
+ If environment files are combined with explicit environment variables,
+ the explicit environment variables take precedence.
+ """
+ env_dict = {}
+ if env_files:
+ for env_file in env_files:
+ parsed_env_file = parse_env_file(env_file)
+ for name, value in parsed_env_file.items():
+ env_dict[name] = str(value)
+ if env is not None and isinstance(env, string_types):
+ env = env.split(',')
+ if env is not None and isinstance(env, dict):
+ for name, value in env.items():
+ if not isinstance(value, string_types):
+ raise ValueError(
+ 'Non-string value found for env option. '
+ 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name
+ )
+ env_dict[name] = str(value)
+ elif env is not None and isinstance(env, list):
+ for item in env:
+ try:
+ name, value = item.split('=', 1)
+ except ValueError:
+ raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.')
+ env_dict[name] = value
+ elif env is not None:
+ raise ValueError(
+ 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env))
+ )
+ env_list = format_environment(env_dict)
+ if not env_list:
+ if env is not None or env_files is not None:
+ return []
+ else:
+ return None
+ return sorted(env_list)
+
+
+def get_docker_networks(networks, network_ids):
+ """
+ Validate a list of network names or a list of network dictionaries.
+ Network names will be resolved to ids by using the network_ids mapping.
+ """
+ if networks is None:
+ return None
+ parsed_networks = []
+ for network in networks:
+ if isinstance(network, string_types):
+ parsed_network = {'name': network}
+ elif isinstance(network, dict):
+ if 'name' not in network:
+ raise TypeError(
+ '"name" is required when networks are passed as dictionaries.'
+ )
+ name = network.pop('name')
+ parsed_network = {'name': name}
+ aliases = network.pop('aliases', None)
+ if aliases is not None:
+ if not isinstance(aliases, list):
+ raise TypeError('"aliases" network option is only allowed as a list')
+ if not all(
+ isinstance(alias, string_types) for alias in aliases
+ ):
+ raise TypeError('Only strings are allowed as network aliases.')
+ parsed_network['aliases'] = aliases
+ options = network.pop('options', None)
+ if options is not None:
+ if not isinstance(options, dict):
+ raise TypeError('Only dict is allowed as network options.')
+ parsed_network['options'] = clean_dict_booleans_for_docker_api(options)
+ # Check if any invalid keys left
+ if network:
+ invalid_keys = ', '.join(network.keys())
+ raise TypeError(
+ '%s are not valid keys for the networks option' % invalid_keys
+ )
+
+ else:
+ raise TypeError(
+ 'Only a list of strings or dictionaries are allowed to be passed as networks.'
+ )
+ network_name = parsed_network.pop('name')
+ try:
+ parsed_network['id'] = network_ids[network_name]
+ except KeyError as e:
+ raise ValueError('Could not find a network named: %s.' % e)
+ parsed_networks.append(parsed_network)
+ return parsed_networks or []
+
+
+def get_nanoseconds_from_raw_option(name, value):
+ if value is None:
+ return None
+ elif isinstance(value, int):
+ return value
+ elif isinstance(value, string_types):
+ try:
+ return int(value)
+ except ValueError:
+ return convert_duration_to_nanosecond(value)
+ else:
+ raise ValueError(
+ 'Invalid type for %s %s (%s). Only string or int allowed.'
+ % (name, value, type(value))
+ )
+
+
+def get_value(key, values, default=None):
+ value = values.get(key)
+ return value if value is not None else default
+
+
+def has_dict_changed(new_dict, old_dict):
+ """
+ Check if new_dict has differences compared to old_dict while
+ ignoring keys in old_dict which are None in new_dict.
+ """
+ if new_dict is None:
+ return False
+ if not new_dict and old_dict:
+ return True
+ if not old_dict and new_dict:
+ return True
+ defined_options = dict(
+ (option, value) for option, value in new_dict.items()
+ if value is not None
+ )
+ for option, value in defined_options.items():
+ old_value = old_dict.get(option)
+ if not value and not old_value:
+ continue
+ if value != old_value:
+ return True
+ return False
+
+
+def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None):
+ """
+ Check two lists have differences. Sort lists by default.
+ """
+
+ def sort_list(unsorted_list):
+ """
+ Sort a given list.
+ The list may contain dictionaries, so use the sort key to handle them.
+ """
+
+ if unsorted_list and isinstance(unsorted_list[0], dict):
+ if not sort_key:
+ raise Exception(
+ 'A sort key was not specified when sorting list'
+ )
+ else:
+ return sorted(unsorted_list, key=lambda k: k[sort_key])
+
+ # Either the list is empty or does not contain dictionaries
+ try:
+ return sorted(unsorted_list)
+ except TypeError:
+ return unsorted_list
+
+ if new_list is None:
+ return False
+ old_list = old_list or []
+ if len(new_list) != len(old_list):
+ return True
+
+ if sort_lists:
+ zip_data = zip(sort_list(new_list), sort_list(old_list))
+ else:
+ zip_data = zip(new_list, old_list)
+ for new_item, old_item in zip_data:
+ is_same_type = type(new_item) == type(old_item)
+ if not is_same_type:
+ if isinstance(new_item, string_types) and isinstance(old_item, string_types):
+ # Even though the types are different between these items,
+ # they are both strings. Try matching on the same string type.
+ try:
+ new_item_type = type(new_item)
+ old_item_casted = new_item_type(old_item)
+ if new_item != old_item_casted:
+ return True
+ else:
+ continue
+ except UnicodeEncodeError:
+ # Fallback to assuming the strings are different
+ return True
+ else:
+ return True
+ if isinstance(new_item, dict):
+ if has_dict_changed(new_item, old_item):
+ return True
+ elif new_item != old_item:
+ return True
+
+ return False
+
+
+def have_networks_changed(new_networks, old_networks):
+ """Special case list checking for networks to sort aliases"""
+
+ if new_networks is None:
+ return False
+ old_networks = old_networks or []
+ if len(new_networks) != len(old_networks):
+ return True
+
+ zip_data = zip(
+ sorted(new_networks, key=lambda k: k['id']),
+ sorted(old_networks, key=lambda k: k['id'])
+ )
+
+ for new_item, old_item in zip_data:
+ new_item = dict(new_item)
+ old_item = dict(old_item)
+ # Sort the aliases
+ if 'aliases' in new_item:
+ new_item['aliases'] = sorted(new_item['aliases'] or [])
+ if 'aliases' in old_item:
+ old_item['aliases'] = sorted(old_item['aliases'] or [])
+
+ if has_dict_changed(new_item, old_item):
+ return True
+
+ return False
+
+
+class DockerService(DockerBaseClass):
+ def __init__(self, docker_api_version, docker_py_version):
+ super(DockerService, self).__init__()
+ self.image = ""
+ self.command = None
+ self.args = None
+ self.endpoint_mode = None
+ self.dns = None
+ self.healthcheck = None
+ self.healthcheck_disabled = None
+ self.hostname = None
+ self.hosts = None
+ self.tty = None
+ self.dns_search = None
+ self.dns_options = None
+ self.env = None
+ self.force_update = None
+ self.groups = None
+ self.log_driver = None
+ self.log_driver_options = None
+ self.labels = None
+ self.container_labels = None
+ self.limit_cpu = None
+ self.limit_memory = None
+ self.reserve_cpu = None
+ self.reserve_memory = None
+ self.mode = "replicated"
+ self.user = None
+ self.mounts = None
+ self.configs = None
+ self.secrets = None
+ self.constraints = None
+ self.replicas_max_per_node = None
+ self.networks = None
+ self.stop_grace_period = None
+ self.stop_signal = None
+ self.publish = None
+ self.placement_preferences = None
+ self.replicas = -1
+ self.service_id = False
+ self.service_version = False
+ self.read_only = None
+ self.restart_policy = None
+ self.restart_policy_attempts = None
+ self.restart_policy_delay = None
+ self.restart_policy_window = None
+ self.rollback_config = None
+ self.update_delay = None
+ self.update_parallelism = None
+ self.update_failure_action = None
+ self.update_monitor = None
+ self.update_max_failure_ratio = None
+ self.update_order = None
+ self.working_dir = None
+ self.init = None
+ self.cap_add = None
+ self.cap_drop = None
+
+ self.docker_api_version = docker_api_version
+ self.docker_py_version = docker_py_version
+
+ def get_facts(self):
+ return {
+ 'image': self.image,
+ 'mounts': self.mounts,
+ 'configs': self.configs,
+ 'networks': self.networks,
+ 'command': self.command,
+ 'args': self.args,
+ 'tty': self.tty,
+ 'dns': self.dns,
+ 'dns_search': self.dns_search,
+ 'dns_options': self.dns_options,
+ 'healthcheck': self.healthcheck,
+ 'healthcheck_disabled': self.healthcheck_disabled,
+ 'hostname': self.hostname,
+ 'hosts': self.hosts,
+ 'env': self.env,
+ 'force_update': self.force_update,
+ 'groups': self.groups,
+ 'log_driver': self.log_driver,
+ 'log_driver_options': self.log_driver_options,
+ 'publish': self.publish,
+ 'constraints': self.constraints,
+ 'replicas_max_per_node': self.replicas_max_per_node,
+ 'placement_preferences': self.placement_preferences,
+ 'labels': self.labels,
+ 'container_labels': self.container_labels,
+ 'mode': self.mode,
+ 'replicas': self.replicas,
+ 'endpoint_mode': self.endpoint_mode,
+ 'restart_policy': self.restart_policy,
+ 'secrets': self.secrets,
+ 'stop_grace_period': self.stop_grace_period,
+ 'stop_signal': self.stop_signal,
+ 'limit_cpu': self.limit_cpu,
+ 'limit_memory': self.limit_memory,
+ 'read_only': self.read_only,
+ 'reserve_cpu': self.reserve_cpu,
+ 'reserve_memory': self.reserve_memory,
+ 'restart_policy_delay': self.restart_policy_delay,
+ 'restart_policy_attempts': self.restart_policy_attempts,
+ 'restart_policy_window': self.restart_policy_window,
+ 'rollback_config': self.rollback_config,
+ 'update_delay': self.update_delay,
+ 'update_parallelism': self.update_parallelism,
+ 'update_failure_action': self.update_failure_action,
+ 'update_monitor': self.update_monitor,
+ 'update_max_failure_ratio': self.update_max_failure_ratio,
+ 'update_order': self.update_order,
+ 'user': self.user,
+ 'working_dir': self.working_dir,
+ 'init': self.init,
+ 'cap_add': self.cap_add,
+ 'cap_drop': self.cap_drop,
+ }
+
+ @property
+ def can_update_networks(self):
+ # Before Docker API 1.29 adding/removing networks was not supported
+ return (
+ self.docker_api_version >= LooseVersion('1.29') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @property
+ def can_use_task_template_networks(self):
+ # In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec
+ return self.docker_py_version >= LooseVersion('2.7')
+
+ @staticmethod
+ def get_restart_config_from_ansible_params(params):
+ restart_config = params['restart_config'] or {}
+ condition = get_value(
+ 'condition',
+ restart_config,
+ )
+ delay = get_value(
+ 'delay',
+ restart_config,
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'restart_policy_delay',
+ delay
+ )
+ max_attempts = get_value(
+ 'max_attempts',
+ restart_config,
+ )
+ window = get_value(
+ 'window',
+ restart_config,
+ )
+ window = get_nanoseconds_from_raw_option(
+ 'restart_policy_window',
+ window
+ )
+ return {
+ 'restart_policy': condition,
+ 'restart_policy_delay': delay,
+ 'restart_policy_attempts': max_attempts,
+ 'restart_policy_window': window
+ }
+
+ @staticmethod
+ def get_update_config_from_ansible_params(params):
+ update_config = params['update_config'] or {}
+ parallelism = get_value(
+ 'parallelism',
+ update_config,
+ )
+ delay = get_value(
+ 'delay',
+ update_config,
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'update_delay',
+ delay
+ )
+ failure_action = get_value(
+ 'failure_action',
+ update_config,
+ )
+ monitor = get_value(
+ 'monitor',
+ update_config,
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'update_monitor',
+ monitor
+ )
+ max_failure_ratio = get_value(
+ 'max_failure_ratio',
+ update_config,
+ )
+ order = get_value(
+ 'order',
+ update_config,
+ )
+ return {
+ 'update_parallelism': parallelism,
+ 'update_delay': delay,
+ 'update_failure_action': failure_action,
+ 'update_monitor': monitor,
+ 'update_max_failure_ratio': max_failure_ratio,
+ 'update_order': order
+ }
+
+ @staticmethod
+ def get_rollback_config_from_ansible_params(params):
+ if params['rollback_config'] is None:
+ return None
+ rollback_config = params['rollback_config'] or {}
+ delay = get_nanoseconds_from_raw_option(
+ 'rollback_config.delay',
+ rollback_config.get('delay')
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'rollback_config.monitor',
+ rollback_config.get('monitor')
+ )
+ return {
+ 'parallelism': rollback_config.get('parallelism'),
+ 'delay': delay,
+ 'failure_action': rollback_config.get('failure_action'),
+ 'monitor': monitor,
+ 'max_failure_ratio': rollback_config.get('max_failure_ratio'),
+ 'order': rollback_config.get('order'),
+
+ }
+
+ @staticmethod
+ def get_logging_from_ansible_params(params):
+ logging_config = params['logging'] or {}
+ driver = get_value(
+ 'driver',
+ logging_config,
+ )
+ options = get_value(
+ 'options',
+ logging_config,
+ )
+ return {
+ 'log_driver': driver,
+ 'log_driver_options': options,
+ }
+
+ @staticmethod
+ def get_limits_from_ansible_params(params):
+ limits = params['limits'] or {}
+ cpus = get_value(
+ 'cpus',
+ limits,
+ )
+ memory = get_value(
+ 'memory',
+ limits,
+ )
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert limit_memory to bytes: %s' % exc)
+ return {
+ 'limit_cpu': cpus,
+ 'limit_memory': memory,
+ }
+
+ @staticmethod
+ def get_reservations_from_ansible_params(params):
+ reservations = params['reservations'] or {}
+ cpus = get_value(
+ 'cpus',
+ reservations,
+ )
+ memory = get_value(
+ 'memory',
+ reservations,
+ )
+
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert reserve_memory to bytes: %s' % exc)
+ return {
+ 'reserve_cpu': cpus,
+ 'reserve_memory': memory,
+ }
+
+ @staticmethod
+ def get_placement_from_ansible_params(params):
+ placement = params['placement'] or {}
+ constraints = get_value(
+ 'constraints',
+ placement
+ )
+
+ preferences = placement.get('preferences')
+ replicas_max_per_node = get_value(
+ 'replicas_max_per_node',
+ placement
+ )
+
+ return {
+ 'constraints': constraints,
+ 'placement_preferences': preferences,
+ 'replicas_max_per_node': replicas_max_per_node,
+ }
+
+ @classmethod
+ def from_ansible_params(
+ cls,
+ ap,
+ old_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ docker_api_version,
+ docker_py_version,
+ ):
+ s = DockerService(docker_api_version, docker_py_version)
+ s.image = image_digest
+ s.args = ap['args']
+ s.endpoint_mode = ap['endpoint_mode']
+ s.dns = ap['dns']
+ s.dns_search = ap['dns_search']
+ s.dns_options = ap['dns_options']
+ s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck'])
+ s.hostname = ap['hostname']
+ s.hosts = ap['hosts']
+ s.tty = ap['tty']
+ s.labels = ap['labels']
+ s.container_labels = ap['container_labels']
+ s.mode = ap['mode']
+ s.stop_signal = ap['stop_signal']
+ s.user = ap['user']
+ s.working_dir = ap['working_dir']
+ s.read_only = ap['read_only']
+ s.init = ap['init']
+ s.cap_add = ap['cap_add']
+ s.cap_drop = ap['cap_drop']
+
+ s.networks = get_docker_networks(ap['networks'], network_ids)
+
+ s.command = ap['command']
+ if isinstance(s.command, string_types):
+ s.command = shlex.split(s.command)
+ elif isinstance(s.command, list):
+ invalid_items = [
+ (index, item)
+ for index, item in enumerate(s.command)
+ if not isinstance(item, string_types)
+ ]
+ if invalid_items:
+ errors = ', '.join(
+ [
+ '%s (%s) at index %s' % (item, type(item), index)
+ for index, item in invalid_items
+ ]
+ )
+ raise Exception(
+ 'All items in a command list need to be strings. '
+ 'Check quoting. Invalid items: %s.'
+ % errors
+ )
+ s.command = ap['command']
+ elif s.command is not None:
+ raise ValueError(
+ 'Invalid type for command %s (%s). '
+ 'Only string or list allowed. Check quoting.'
+ % (s.command, type(s.command))
+ )
+
+ s.env = get_docker_environment(ap['env'], ap['env_files'])
+ s.rollback_config = cls.get_rollback_config_from_ansible_params(ap)
+
+ update_config = cls.get_update_config_from_ansible_params(ap)
+ for key, value in update_config.items():
+ setattr(s, key, value)
+
+ restart_config = cls.get_restart_config_from_ansible_params(ap)
+ for key, value in restart_config.items():
+ setattr(s, key, value)
+
+ logging_config = cls.get_logging_from_ansible_params(ap)
+ for key, value in logging_config.items():
+ setattr(s, key, value)
+
+ limits = cls.get_limits_from_ansible_params(ap)
+ for key, value in limits.items():
+ setattr(s, key, value)
+
+ reservations = cls.get_reservations_from_ansible_params(ap)
+ for key, value in reservations.items():
+ setattr(s, key, value)
+
+ placement = cls.get_placement_from_ansible_params(ap)
+ for key, value in placement.items():
+ setattr(s, key, value)
+
+ if ap['stop_grace_period'] is not None:
+ s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period'])
+
+ if ap['force_update']:
+ s.force_update = int(str(time.time()).replace('.', ''))
+
+ if ap['groups'] is not None:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ s.groups = [str(g) for g in ap['groups']]
+
+ if ap['replicas'] == -1:
+ if old_service:
+ s.replicas = old_service.replicas
+ else:
+ s.replicas = 1
+ else:
+ s.replicas = ap['replicas']
+
+ if ap['publish'] is not None:
+ s.publish = []
+ for param_p in ap['publish']:
+ service_p = {}
+ service_p['protocol'] = param_p['protocol']
+ service_p['mode'] = param_p['mode']
+ service_p['published_port'] = param_p['published_port']
+ service_p['target_port'] = param_p['target_port']
+ s.publish.append(service_p)
+
+ if ap['mounts'] is not None:
+ s.mounts = []
+ for param_m in ap['mounts']:
+ service_m = {}
+ service_m['readonly'] = param_m['readonly']
+ service_m['type'] = param_m['type']
+ if param_m['source'] is None and param_m['type'] != 'tmpfs':
+ raise ValueError('Source must be specified for mounts which are not of type tmpfs')
+ service_m['source'] = param_m['source'] or ''
+ service_m['target'] = param_m['target']
+ service_m['labels'] = param_m['labels']
+ service_m['no_copy'] = param_m['no_copy']
+ service_m['propagation'] = param_m['propagation']
+ service_m['driver_config'] = param_m['driver_config']
+ service_m['tmpfs_mode'] = param_m['tmpfs_mode']
+ tmpfs_size = param_m['tmpfs_size']
+ if tmpfs_size is not None:
+ try:
+ tmpfs_size = human_to_bytes(tmpfs_size)
+ except ValueError as exc:
+ raise ValueError(
+ 'Failed to convert tmpfs_size to bytes: %s' % exc
+ )
+
+ service_m['tmpfs_size'] = tmpfs_size
+ s.mounts.append(service_m)
+
+ if ap['configs'] is not None:
+ s.configs = []
+ for param_m in ap['configs']:
+ service_c = {}
+ config_name = param_m['config_name']
+ service_c['config_id'] = param_m['config_id'] or config_ids[config_name]
+ service_c['config_name'] = config_name
+ service_c['filename'] = param_m['filename'] or config_name
+ service_c['uid'] = param_m['uid']
+ service_c['gid'] = param_m['gid']
+ service_c['mode'] = param_m['mode']
+ s.configs.append(service_c)
+
+ if ap['secrets'] is not None:
+ s.secrets = []
+ for param_m in ap['secrets']:
+ service_s = {}
+ secret_name = param_m['secret_name']
+ service_s['secret_id'] = param_m['secret_id'] or secret_ids[secret_name]
+ service_s['secret_name'] = secret_name
+ service_s['filename'] = param_m['filename'] or secret_name
+ service_s['uid'] = param_m['uid']
+ service_s['gid'] = param_m['gid']
+ service_s['mode'] = param_m['mode']
+ s.secrets.append(service_s)
+
+ return s
+
+ def compare(self, os):
+ differences = DifferenceTracker()
+ needs_rebuild = False
+ force_update = False
+ if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode:
+ differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode)
+ if has_list_changed(self.env, os.env):
+ differences.add('env', parameter=self.env, active=os.env)
+ if self.log_driver is not None and self.log_driver != os.log_driver:
+ differences.add('log_driver', parameter=self.log_driver, active=os.log_driver)
+ if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}):
+ differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options)
+ if self.mode != os.mode:
+ needs_rebuild = True
+ differences.add('mode', parameter=self.mode, active=os.mode)
+ if has_list_changed(self.mounts, os.mounts, sort_key='target'):
+ differences.add('mounts', parameter=self.mounts, active=os.mounts)
+ if has_list_changed(self.configs, os.configs, sort_key='config_name'):
+ differences.add('configs', parameter=self.configs, active=os.configs)
+ if has_list_changed(self.secrets, os.secrets, sort_key='secret_name'):
+ differences.add('secrets', parameter=self.secrets, active=os.secrets)
+ if have_networks_changed(self.networks, os.networks):
+ differences.add('networks', parameter=self.networks, active=os.networks)
+ needs_rebuild = not self.can_update_networks
+ if self.replicas != os.replicas:
+ differences.add('replicas', parameter=self.replicas, active=os.replicas)
+ if has_list_changed(self.command, os.command, sort_lists=False):
+ differences.add('command', parameter=self.command, active=os.command)
+ if has_list_changed(self.args, os.args, sort_lists=False):
+ differences.add('args', parameter=self.args, active=os.args)
+ if has_list_changed(self.constraints, os.constraints):
+ differences.add('constraints', parameter=self.constraints, active=os.constraints)
+ if self.replicas_max_per_node is not None and self.replicas_max_per_node != os.replicas_max_per_node:
+ differences.add('replicas_max_per_node', parameter=self.replicas_max_per_node, active=os.replicas_max_per_node)
+ if has_list_changed(self.placement_preferences, os.placement_preferences, sort_lists=False):
+ differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences)
+ if has_list_changed(self.groups, os.groups):
+ differences.add('groups', parameter=self.groups, active=os.groups)
+ if self.labels is not None and self.labels != (os.labels or {}):
+ differences.add('labels', parameter=self.labels, active=os.labels)
+ if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu:
+ differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu)
+ if self.limit_memory is not None and self.limit_memory != os.limit_memory:
+ differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory)
+ if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu:
+ differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu)
+ if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory:
+ differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory)
+ if self.container_labels is not None and self.container_labels != (os.container_labels or {}):
+ differences.add('container_labels', parameter=self.container_labels, active=os.container_labels)
+ if self.stop_signal is not None and self.stop_signal != os.stop_signal:
+ differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal)
+ if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period:
+ differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period)
+ if self.has_publish_changed(os.publish):
+ differences.add('publish', parameter=self.publish, active=os.publish)
+ if self.read_only is not None and self.read_only != os.read_only:
+ differences.add('read_only', parameter=self.read_only, active=os.read_only)
+ if self.restart_policy is not None and self.restart_policy != os.restart_policy:
+ differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy)
+ if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts:
+ differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts)
+ if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay:
+ differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay)
+ if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window:
+ differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window)
+ if has_dict_changed(self.rollback_config, os.rollback_config):
+ differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config)
+ if self.update_delay is not None and self.update_delay != os.update_delay:
+ differences.add('update_delay', parameter=self.update_delay, active=os.update_delay)
+ if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism:
+ differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism)
+ if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action:
+ differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action)
+ if self.update_monitor is not None and self.update_monitor != os.update_monitor:
+ differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor)
+ if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio:
+ differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio)
+ if self.update_order is not None and self.update_order != os.update_order:
+ differences.add('update_order', parameter=self.update_order, active=os.update_order)
+ has_image_changed, change = self.has_image_changed(os.image)
+ if has_image_changed:
+ differences.add('image', parameter=self.image, active=change)
+ if self.user and self.user != os.user:
+ differences.add('user', parameter=self.user, active=os.user)
+ if has_list_changed(self.dns, os.dns, sort_lists=False):
+ differences.add('dns', parameter=self.dns, active=os.dns)
+ if has_list_changed(self.dns_search, os.dns_search, sort_lists=False):
+ differences.add('dns_search', parameter=self.dns_search, active=os.dns_search)
+ if has_list_changed(self.dns_options, os.dns_options):
+ differences.add('dns_options', parameter=self.dns_options, active=os.dns_options)
+ if self.has_healthcheck_changed(os):
+ differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck)
+ if self.hostname is not None and self.hostname != os.hostname:
+ differences.add('hostname', parameter=self.hostname, active=os.hostname)
+ if self.hosts is not None and self.hosts != (os.hosts or {}):
+ differences.add('hosts', parameter=self.hosts, active=os.hosts)
+ if self.tty is not None and self.tty != os.tty:
+ differences.add('tty', parameter=self.tty, active=os.tty)
+ if self.working_dir is not None and self.working_dir != os.working_dir:
+ differences.add('working_dir', parameter=self.working_dir, active=os.working_dir)
+ if self.force_update:
+ force_update = True
+ if self.init is not None and self.init != os.init:
+ differences.add('init', parameter=self.init, active=os.init)
+ if has_list_changed(self.cap_add, os.cap_add):
+ differences.add('cap_add', parameter=self.cap_add, active=os.cap_add)
+ if has_list_changed(self.cap_drop, os.cap_drop):
+ differences.add('cap_drop', parameter=self.cap_drop, active=os.cap_drop)
+ return not differences.empty or force_update, differences, needs_rebuild, force_update
+
+ def has_healthcheck_changed(self, old_publish):
+ if self.healthcheck_disabled is False and self.healthcheck is None:
+ return False
+ if self.healthcheck_disabled:
+ if old_publish.healthcheck is None:
+ return False
+ if old_publish.healthcheck.get('test') == ['NONE']:
+ return False
+ return self.healthcheck != old_publish.healthcheck
+
+ def has_publish_changed(self, old_publish):
+ if self.publish is None:
+ return False
+ old_publish = old_publish or []
+ if len(self.publish) != len(old_publish):
+ return True
+
+ def publish_sorter(item):
+ return (item.get('published_port') or 0, item.get('target_port') or 0, item.get('protocol') or '')
+
+ publish = sorted(self.publish, key=publish_sorter)
+ old_publish = sorted(old_publish, key=publish_sorter)
+ for publish_item, old_publish_item in zip(publish, old_publish):
+ ignored_keys = set()
+ if not publish_item.get('mode'):
+ ignored_keys.add('mode')
+ # Create copies of publish_item dicts where keys specified in ignored_keys are left out
+ filtered_old_publish_item = dict(
+ (k, v) for k, v in old_publish_item.items() if k not in ignored_keys
+ )
+ filtered_publish_item = dict(
+ (k, v) for k, v in publish_item.items() if k not in ignored_keys
+ )
+ if filtered_publish_item != filtered_old_publish_item:
+ return True
+ return False
+
+ def has_image_changed(self, old_image):
+ if '@' not in self.image:
+ old_image = old_image.split('@')[0]
+ return self.image != old_image, old_image
+
+ def build_container_spec(self):
+ mounts = None
+ if self.mounts is not None:
+ mounts = []
+ for mount_config in self.mounts:
+ mount_options = {
+ 'target': 'target',
+ 'source': 'source',
+ 'type': 'type',
+ 'readonly': 'read_only',
+ 'propagation': 'propagation',
+ 'labels': 'labels',
+ 'no_copy': 'no_copy',
+ 'driver_config': 'driver_config',
+ 'tmpfs_size': 'tmpfs_size',
+ 'tmpfs_mode': 'tmpfs_mode'
+ }
+ mount_args = {}
+ for option, mount_arg in mount_options.items():
+ value = mount_config.get(option)
+ if value is not None:
+ mount_args[mount_arg] = value
+
+ mounts.append(types.Mount(**mount_args))
+
+ configs = None
+ if self.configs is not None:
+ configs = []
+ for config_config in self.configs:
+ config_args = {
+ 'config_id': config_config['config_id'],
+ 'config_name': config_config['config_name']
+ }
+ filename = config_config.get('filename')
+ if filename:
+ config_args['filename'] = filename
+ uid = config_config.get('uid')
+ if uid:
+ config_args['uid'] = uid
+ gid = config_config.get('gid')
+ if gid:
+ config_args['gid'] = gid
+ mode = config_config.get('mode')
+ if mode:
+ config_args['mode'] = mode
+
+ configs.append(types.ConfigReference(**config_args))
+
+ secrets = None
+ if self.secrets is not None:
+ secrets = []
+ for secret_config in self.secrets:
+ secret_args = {
+ 'secret_id': secret_config['secret_id'],
+ 'secret_name': secret_config['secret_name']
+ }
+ filename = secret_config.get('filename')
+ if filename:
+ secret_args['filename'] = filename
+ uid = secret_config.get('uid')
+ if uid:
+ secret_args['uid'] = uid
+ gid = secret_config.get('gid')
+ if gid:
+ secret_args['gid'] = gid
+ mode = secret_config.get('mode')
+ if mode:
+ secret_args['mode'] = mode
+
+ secrets.append(types.SecretReference(**secret_args))
+
+ dns_config_args = {}
+ if self.dns is not None:
+ dns_config_args['nameservers'] = self.dns
+ if self.dns_search is not None:
+ dns_config_args['search'] = self.dns_search
+ if self.dns_options is not None:
+ dns_config_args['options'] = self.dns_options
+ dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None
+
+ container_spec_args = {}
+ if self.command is not None:
+ container_spec_args['command'] = self.command
+ if self.args is not None:
+ container_spec_args['args'] = self.args
+ if self.env is not None:
+ container_spec_args['env'] = self.env
+ if self.user is not None:
+ container_spec_args['user'] = self.user
+ if self.container_labels is not None:
+ container_spec_args['labels'] = self.container_labels
+ if self.healthcheck is not None:
+ container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck)
+ elif self.healthcheck_disabled:
+ container_spec_args['healthcheck'] = types.Healthcheck(test=['NONE'])
+ if self.hostname is not None:
+ container_spec_args['hostname'] = self.hostname
+ if self.hosts is not None:
+ container_spec_args['hosts'] = self.hosts
+ if self.read_only is not None:
+ container_spec_args['read_only'] = self.read_only
+ if self.stop_grace_period is not None:
+ container_spec_args['stop_grace_period'] = self.stop_grace_period
+ if self.stop_signal is not None:
+ container_spec_args['stop_signal'] = self.stop_signal
+ if self.tty is not None:
+ container_spec_args['tty'] = self.tty
+ if self.groups is not None:
+ container_spec_args['groups'] = self.groups
+ if self.working_dir is not None:
+ container_spec_args['workdir'] = self.working_dir
+ if secrets is not None:
+ container_spec_args['secrets'] = secrets
+ if mounts is not None:
+ container_spec_args['mounts'] = mounts
+ if dns_config is not None:
+ container_spec_args['dns_config'] = dns_config
+ if configs is not None:
+ container_spec_args['configs'] = configs
+ if self.init is not None:
+ container_spec_args['init'] = self.init
+ if self.cap_add is not None:
+ container_spec_args['cap_add'] = self.cap_add
+ if self.cap_drop is not None:
+ container_spec_args['cap_drop'] = self.cap_drop
+
+ return types.ContainerSpec(self.image, **container_spec_args)
+
+ def build_placement(self):
+ placement_args = {}
+ if self.constraints is not None:
+ placement_args['constraints'] = self.constraints
+ if self.replicas_max_per_node is not None:
+ placement_args['maxreplicas'] = self.replicas_max_per_node
+ if self.placement_preferences is not None:
+ placement_args['preferences'] = [
+ {key.title(): {'SpreadDescriptor': value}}
+ for preference in self.placement_preferences
+ for key, value in preference.items()
+ ]
+ return types.Placement(**placement_args) if placement_args else None
+
+ def build_update_config(self):
+ update_config_args = {}
+ if self.update_parallelism is not None:
+ update_config_args['parallelism'] = self.update_parallelism
+ if self.update_delay is not None:
+ update_config_args['delay'] = self.update_delay
+ if self.update_failure_action is not None:
+ update_config_args['failure_action'] = self.update_failure_action
+ if self.update_monitor is not None:
+ update_config_args['monitor'] = self.update_monitor
+ if self.update_max_failure_ratio is not None:
+ update_config_args['max_failure_ratio'] = self.update_max_failure_ratio
+ if self.update_order is not None:
+ update_config_args['order'] = self.update_order
+ return types.UpdateConfig(**update_config_args) if update_config_args else None
+
+ def build_log_driver(self):
+ log_driver_args = {}
+ if self.log_driver is not None:
+ log_driver_args['name'] = self.log_driver
+ if self.log_driver_options is not None:
+ log_driver_args['options'] = self.log_driver_options
+ return types.DriverConfig(**log_driver_args) if log_driver_args else None
+
+ def build_restart_policy(self):
+ restart_policy_args = {}
+ if self.restart_policy is not None:
+ restart_policy_args['condition'] = self.restart_policy
+ if self.restart_policy_delay is not None:
+ restart_policy_args['delay'] = self.restart_policy_delay
+ if self.restart_policy_attempts is not None:
+ restart_policy_args['max_attempts'] = self.restart_policy_attempts
+ if self.restart_policy_window is not None:
+ restart_policy_args['window'] = self.restart_policy_window
+ return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None
+
+ def build_rollback_config(self):
+ if self.rollback_config is None:
+ return None
+ rollback_config_options = [
+ 'parallelism',
+ 'delay',
+ 'failure_action',
+ 'monitor',
+ 'max_failure_ratio',
+ 'order',
+ ]
+ rollback_config_args = {}
+ for option in rollback_config_options:
+ value = self.rollback_config.get(option)
+ if value is not None:
+ rollback_config_args[option] = value
+ return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None
+
+ def build_resources(self):
+ resources_args = {}
+ if self.limit_cpu is not None:
+ resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0)
+ if self.limit_memory is not None:
+ resources_args['mem_limit'] = self.limit_memory
+ if self.reserve_cpu is not None:
+ resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0)
+ if self.reserve_memory is not None:
+ resources_args['mem_reservation'] = self.reserve_memory
+ return types.Resources(**resources_args) if resources_args else None
+
+ def build_task_template(self, container_spec, placement=None):
+ log_driver = self.build_log_driver()
+ restart_policy = self.build_restart_policy()
+ resources = self.build_resources()
+
+ task_template_args = {}
+ if placement is not None:
+ task_template_args['placement'] = placement
+ if log_driver is not None:
+ task_template_args['log_driver'] = log_driver
+ if restart_policy is not None:
+ task_template_args['restart_policy'] = restart_policy
+ if resources is not None:
+ task_template_args['resources'] = resources
+ if self.force_update:
+ task_template_args['force_update'] = self.force_update
+ if self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ task_template_args['networks'] = networks
+ return types.TaskTemplate(container_spec=container_spec, **task_template_args)
+
+ def build_service_mode(self):
+ if self.mode == 'global':
+ self.replicas = None
+ return types.ServiceMode(self.mode, replicas=self.replicas)
+
+ def build_networks(self):
+ networks = None
+ if self.networks is not None:
+ networks = []
+ for network in self.networks:
+ docker_network = {'Target': network['id']}
+ if 'aliases' in network:
+ docker_network['Aliases'] = network['aliases']
+ if 'options' in network:
+ docker_network['DriverOpts'] = network['options']
+ networks.append(docker_network)
+ return networks
+
+ def build_endpoint_spec(self):
+ endpoint_spec_args = {}
+ if self.publish is not None:
+ ports = []
+ for port in self.publish:
+ port_spec = {
+ 'Protocol': port['protocol'],
+ 'TargetPort': port['target_port']
+ }
+ if port.get('published_port'):
+ port_spec['PublishedPort'] = port['published_port']
+ if port.get('mode'):
+ port_spec['PublishMode'] = port['mode']
+ ports.append(port_spec)
+ endpoint_spec_args['ports'] = ports
+ if self.endpoint_mode is not None:
+ endpoint_spec_args['mode'] = self.endpoint_mode
+ return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None
+
+ def build_docker_service(self):
+ container_spec = self.build_container_spec()
+ placement = self.build_placement()
+ task_template = self.build_task_template(container_spec, placement)
+
+ update_config = self.build_update_config()
+ rollback_config = self.build_rollback_config()
+ service_mode = self.build_service_mode()
+ endpoint_spec = self.build_endpoint_spec()
+
+ service = {'task_template': task_template, 'mode': service_mode}
+ if update_config:
+ service['update_config'] = update_config
+ if rollback_config:
+ service['rollback_config'] = rollback_config
+ if endpoint_spec:
+ service['endpoint_spec'] = endpoint_spec
+ if self.labels:
+ service['labels'] = self.labels
+ if not self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ service['networks'] = networks
+ return service
+
+
+class DockerServiceManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.retries = 2
+ self.diff_tracker = None
+
+ def get_service(self, name):
+ try:
+ raw_data = self.client.inspect_service(name)
+ except NotFound:
+ return None
+ ds = DockerService(self.client.docker_api_version, self.client.docker_py_version)
+
+ task_template_data = raw_data['Spec']['TaskTemplate']
+ ds.image = task_template_data['ContainerSpec']['Image']
+ ds.user = task_template_data['ContainerSpec'].get('User')
+ ds.env = task_template_data['ContainerSpec'].get('Env')
+ ds.command = task_template_data['ContainerSpec'].get('Command')
+ ds.args = task_template_data['ContainerSpec'].get('Args')
+ ds.groups = task_template_data['ContainerSpec'].get('Groups')
+ ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod')
+ ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal')
+ ds.working_dir = task_template_data['ContainerSpec'].get('Dir')
+ ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly')
+ ds.cap_add = task_template_data['ContainerSpec'].get('CapabilityAdd')
+ ds.cap_drop = task_template_data['ContainerSpec'].get('CapabilityDrop')
+
+ healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck')
+ if healthcheck_data:
+ options = {
+ 'Test': 'test',
+ 'Interval': 'interval',
+ 'Timeout': 'timeout',
+ 'StartPeriod': 'start_period',
+ 'Retries': 'retries'
+ }
+ healthcheck = dict(
+ (options[key], value) for key, value in healthcheck_data.items()
+ if value is not None and key in options
+ )
+ ds.healthcheck = healthcheck
+
+ update_config_data = raw_data['Spec'].get('UpdateConfig')
+ if update_config_data:
+ ds.update_delay = update_config_data.get('Delay')
+ ds.update_parallelism = update_config_data.get('Parallelism')
+ ds.update_failure_action = update_config_data.get('FailureAction')
+ ds.update_monitor = update_config_data.get('Monitor')
+ ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio')
+ ds.update_order = update_config_data.get('Order')
+
+ rollback_config_data = raw_data['Spec'].get('RollbackConfig')
+ if rollback_config_data:
+ ds.rollback_config = {
+ 'parallelism': rollback_config_data.get('Parallelism'),
+ 'delay': rollback_config_data.get('Delay'),
+ 'failure_action': rollback_config_data.get('FailureAction'),
+ 'monitor': rollback_config_data.get('Monitor'),
+ 'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'),
+ 'order': rollback_config_data.get('Order'),
+ }
+
+ dns_config = task_template_data['ContainerSpec'].get('DNSConfig')
+ if dns_config:
+ ds.dns = dns_config.get('Nameservers')
+ ds.dns_search = dns_config.get('Search')
+ ds.dns_options = dns_config.get('Options')
+
+ ds.hostname = task_template_data['ContainerSpec'].get('Hostname')
+
+ hosts = task_template_data['ContainerSpec'].get('Hosts')
+ if hosts:
+ hosts = [
+ list(reversed(host.split(":", 1)))
+ if ":" in host
+ else host.split(" ", 1)
+ for host in hosts
+ ]
+ ds.hosts = dict((hostname, ip) for ip, hostname in hosts)
+ ds.tty = task_template_data['ContainerSpec'].get('TTY')
+
+ placement = task_template_data.get('Placement')
+ if placement:
+ ds.constraints = placement.get('Constraints')
+ ds.replicas_max_per_node = placement.get('MaxReplicas')
+ placement_preferences = []
+ for preference in placement.get('Preferences', []):
+ placement_preferences.append(
+ dict(
+ (key.lower(), value['SpreadDescriptor'])
+ for key, value in preference.items()
+ )
+ )
+ ds.placement_preferences = placement_preferences or None
+
+ restart_policy_data = task_template_data.get('RestartPolicy')
+ if restart_policy_data:
+ ds.restart_policy = restart_policy_data.get('Condition')
+ ds.restart_policy_delay = restart_policy_data.get('Delay')
+ ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts')
+ ds.restart_policy_window = restart_policy_data.get('Window')
+
+ raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec')
+ if raw_data_endpoint_spec:
+ ds.endpoint_mode = raw_data_endpoint_spec.get('Mode')
+ raw_data_ports = raw_data_endpoint_spec.get('Ports')
+ if raw_data_ports:
+ ds.publish = []
+ for port in raw_data_ports:
+ ds.publish.append({
+ 'protocol': port['Protocol'],
+ 'mode': port.get('PublishMode', None),
+ 'published_port': port.get('PublishedPort', None),
+ 'target_port': int(port['TargetPort'])
+ })
+
+ raw_data_limits = task_template_data.get('Resources', {}).get('Limits')
+ if raw_data_limits:
+ raw_cpu_limits = raw_data_limits.get('NanoCPUs')
+ if raw_cpu_limits:
+ ds.limit_cpu = float(raw_cpu_limits) / 1000000000
+
+ raw_memory_limits = raw_data_limits.get('MemoryBytes')
+ if raw_memory_limits:
+ ds.limit_memory = int(raw_memory_limits)
+
+ raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations')
+ if raw_data_reservations:
+ raw_cpu_reservations = raw_data_reservations.get('NanoCPUs')
+ if raw_cpu_reservations:
+ ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000
+
+ raw_memory_reservations = raw_data_reservations.get('MemoryBytes')
+ if raw_memory_reservations:
+ ds.reserve_memory = int(raw_memory_reservations)
+
+ ds.labels = raw_data['Spec'].get('Labels')
+ ds.log_driver = task_template_data.get('LogDriver', {}).get('Name')
+ ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options')
+ ds.container_labels = task_template_data['ContainerSpec'].get('Labels')
+
+ mode = raw_data['Spec']['Mode']
+ if 'Replicated' in mode.keys():
+ ds.mode = to_text('replicated', encoding='utf-8')
+ ds.replicas = mode['Replicated']['Replicas']
+ elif 'Global' in mode.keys():
+ ds.mode = 'global'
+ else:
+ raise Exception('Unknown service mode: %s' % mode)
+
+ raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts')
+ if raw_data_mounts:
+ ds.mounts = []
+ for mount_data in raw_data_mounts:
+ bind_options = mount_data.get('BindOptions', {})
+ volume_options = mount_data.get('VolumeOptions', {})
+ tmpfs_options = mount_data.get('TmpfsOptions', {})
+ driver_config = volume_options.get('DriverConfig', {})
+ driver_config = dict(
+ (key.lower(), value) for key, value in driver_config.items()
+ ) or None
+ ds.mounts.append({
+ 'source': mount_data.get('Source', ''),
+ 'type': mount_data['Type'],
+ 'target': mount_data['Target'],
+ 'readonly': mount_data.get('ReadOnly'),
+ 'propagation': bind_options.get('Propagation'),
+ 'no_copy': volume_options.get('NoCopy'),
+ 'labels': volume_options.get('Labels'),
+ 'driver_config': driver_config,
+ 'tmpfs_mode': tmpfs_options.get('Mode'),
+ 'tmpfs_size': tmpfs_options.get('SizeBytes'),
+ })
+
+ raw_data_configs = task_template_data['ContainerSpec'].get('Configs')
+ if raw_data_configs:
+ ds.configs = []
+ for config_data in raw_data_configs:
+ ds.configs.append({
+ 'config_id': config_data['ConfigID'],
+ 'config_name': config_data['ConfigName'],
+ 'filename': config_data['File'].get('Name'),
+ 'uid': config_data['File'].get('UID'),
+ 'gid': config_data['File'].get('GID'),
+ 'mode': config_data['File'].get('Mode')
+ })
+
+ raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets')
+ if raw_data_secrets:
+ ds.secrets = []
+ for secret_data in raw_data_secrets:
+ ds.secrets.append({
+ 'secret_id': secret_data['SecretID'],
+ 'secret_name': secret_data['SecretName'],
+ 'filename': secret_data['File'].get('Name'),
+ 'uid': secret_data['File'].get('UID'),
+ 'gid': secret_data['File'].get('GID'),
+ 'mode': secret_data['File'].get('Mode')
+ })
+
+ raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks'))
+ if raw_networks_data:
+ ds.networks = []
+ for network_data in raw_networks_data:
+ network = {'id': network_data['Target']}
+ if 'Aliases' in network_data:
+ network['aliases'] = network_data['Aliases']
+ if 'DriverOpts' in network_data:
+ network['options'] = network_data['DriverOpts']
+ ds.networks.append(network)
+ ds.service_version = raw_data['Version']['Index']
+ ds.service_id = raw_data['ID']
+
+ ds.init = task_template_data['ContainerSpec'].get('Init', False)
+ return ds
+
+ def update_service(self, name, old_service, new_service):
+ service_data = new_service.build_docker_service()
+ result = self.client.update_service(
+ old_service.service_id,
+ old_service.service_version,
+ name=name,
+ **service_data
+ )
+ # Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored.
+ # (see https://github.com/docker/docker-py/pull/2272)
+ self.client.report_warnings(result, ['Warning'])
+
+ def create_service(self, name, service):
+ service_data = service.build_docker_service()
+ result = self.client.create_service(name=name, **service_data)
+ self.client.report_warnings(result, ['Warning'])
+
+ def remove_service(self, name):
+ self.client.remove_service(name)
+
+ def get_image_digest(self, name, resolve=False):
+ if (
+ not name
+ or not resolve
+ ):
+ return name
+ repo, tag = parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ name = repo + ':' + tag
+ distribution_data = self.client.inspect_distribution(name)
+ digest = distribution_data['Descriptor']['digest']
+ return '%s@%s' % (name, digest)
+
+ def get_networks_names_ids(self):
+ return dict(
+ (network['Name'], network['Id']) for network in self.client.networks()
+ )
+
+ def get_missing_secret_ids(self):
+ """
+ Resolve missing secret ids by looking them up by name
+ """
+ secret_names = [
+ secret['secret_name']
+ for secret in self.client.module.params.get('secrets') or []
+ if secret['secret_id'] is None
+ ]
+ if not secret_names:
+ return {}
+ secrets = self.client.secrets(filters={'name': secret_names})
+ secrets = dict(
+ (secret['Spec']['Name'], secret['ID'])
+ for secret in secrets
+ if secret['Spec']['Name'] in secret_names
+ )
+ for secret_name in secret_names:
+ if secret_name not in secrets:
+ self.client.fail(
+ 'Could not find a secret named "%s"' % secret_name
+ )
+ return secrets
+
+ def get_missing_config_ids(self):
+ """
+ Resolve missing config ids by looking them up by name
+ """
+ config_names = [
+ config['config_name']
+ for config in self.client.module.params.get('configs') or []
+ if config['config_id'] is None
+ ]
+ if not config_names:
+ return {}
+ configs = self.client.configs(filters={'name': config_names})
+ configs = dict(
+ (config['Spec']['Name'], config['ID'])
+ for config in configs
+ if config['Spec']['Name'] in config_names
+ )
+ for config_name in config_names:
+ if config_name not in configs:
+ self.client.fail(
+ 'Could not find a config named "%s"' % config_name
+ )
+ return configs
+
+ def run(self):
+ self.diff_tracker = DifferenceTracker()
+ module = self.client.module
+
+ image = module.params['image']
+ try:
+ image_digest = self.get_image_digest(
+ name=image,
+ resolve=module.params['resolve_image']
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'Error looking for an image named %s: %s'
+ % (image, to_native(e))
+ )
+
+ try:
+ current_service = self.get_service(module.params['name'])
+ except Exception as e:
+ self.client.fail(
+ 'Error looking for service named %s: %s'
+ % (module.params['name'], to_native(e))
+ )
+ try:
+ secret_ids = self.get_missing_secret_ids()
+ config_ids = self.get_missing_config_ids()
+ network_ids = self.get_networks_names_ids()
+ new_service = DockerService.from_ansible_params(
+ module.params,
+ current_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ self.client.docker_api_version,
+ self.client.docker_py_version
+ )
+ except Exception as e:
+ return self.client.fail(
+ 'Error parsing module parameters: %s' % to_native(e)
+ )
+
+ changed = False
+ msg = 'noop'
+ rebuilt = False
+ differences = DifferenceTracker()
+ facts = {}
+
+ if current_service:
+ if module.params['state'] == 'absent':
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ msg = 'Service removed'
+ changed = True
+ else:
+ changed, differences, need_rebuild, force_update = new_service.compare(
+ current_service
+ )
+ if changed:
+ self.diff_tracker.merge(differences)
+ if need_rebuild:
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ self.create_service(
+ module.params['name'],
+ new_service
+ )
+ msg = 'Service rebuilt'
+ rebuilt = True
+ else:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service updated'
+ rebuilt = False
+ else:
+ if force_update:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service forcefully updated'
+ rebuilt = False
+ changed = True
+ else:
+ msg = 'Service unchanged'
+ facts = new_service.get_facts()
+ else:
+ if module.params['state'] == 'absent':
+ msg = 'Service absent'
+ else:
+ if not module.check_mode:
+ self.create_service(module.params['name'], new_service)
+ msg = 'Service created'
+ changed = True
+ facts = new_service.get_facts()
+
+ return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts
+
+ def run_safe(self):
+ while True:
+ try:
+ return self.run()
+ except APIError as e:
+ # Sometimes Version.Index will have changed between an inspect and
+ # update. If this is encountered we'll retry the update.
+ if self.retries > 0 and 'update out of sequence' in str(e.explanation):
+ self.retries -= 1
+ time.sleep(1)
+ else:
+ raise
+
+
+def _detect_publish_mode_usage(client):
+ for publish_def in client.module.params['publish'] or []:
+ if publish_def.get('mode'):
+ return True
+ return False
+
+
+def _detect_healthcheck_start_period(client):
+ if client.module.params['healthcheck']:
+ return client.module.params['healthcheck']['start_period'] is not None
+ return False
+
+
+def _detect_mount_tmpfs_usage(client):
+ for mount in client.module.params['mounts'] or []:
+ if mount.get('type') == 'tmpfs':
+ return True
+ if mount.get('tmpfs_size') is not None:
+ return True
+ if mount.get('tmpfs_mode') is not None:
+ return True
+ return False
+
+
+def _detect_update_config_failure_action_rollback(client):
+ rollback_config_failure_action = (
+ (client.module.params['update_config'] or {}).get('failure_action')
+ )
+ return rollback_config_failure_action == 'rollback'
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ image=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ mounts=dict(type='list', elements='dict', options=dict(
+ source=dict(type='str'),
+ target=dict(type='str', required=True),
+ type=dict(
+ type='str',
+ default='bind',
+ choices=['bind', 'volume', 'tmpfs', 'npipe'],
+ ),
+ readonly=dict(type='bool'),
+ labels=dict(type='dict'),
+ propagation=dict(
+ type='str',
+ choices=[
+ 'shared',
+ 'slave',
+ 'private',
+ 'rshared',
+ 'rslave',
+ 'rprivate'
+ ]
+ ),
+ no_copy=dict(type='bool'),
+ driver_config=dict(type='dict', options=dict(
+ name=dict(type='str'),
+ options=dict(type='dict')
+ )),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='int')
+ )),
+ configs=dict(type='list', elements='dict', options=dict(
+ config_id=dict(type='str'),
+ config_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ secrets=dict(type='list', elements='dict', no_log=False, options=dict(
+ secret_id=dict(type='str', no_log=False),
+ secret_name=dict(type='str', required=True, no_log=False),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ networks=dict(type='list', elements='raw'),
+ command=dict(type='raw'),
+ args=dict(type='list', elements='str'),
+ env=dict(type='raw'),
+ env_files=dict(type='list', elements='path'),
+ force_update=dict(type='bool', default=False),
+ groups=dict(type='list', elements='str'),
+ logging=dict(type='dict', options=dict(
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ publish=dict(type='list', elements='dict', options=dict(
+ published_port=dict(type='int', required=False),
+ target_port=dict(type='int', required=True),
+ protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']),
+ mode=dict(type='str', choices=['ingress', 'host']),
+ )),
+ placement=dict(type='dict', options=dict(
+ constraints=dict(type='list', elements='str'),
+ preferences=dict(type='list', elements='dict'),
+ replicas_max_per_node=dict(type='int'),
+ )),
+ tty=dict(type='bool'),
+ dns=dict(type='list', elements='str'),
+ dns_search=dict(type='list', elements='str'),
+ dns_options=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ hosts=dict(type='dict'),
+ labels=dict(type='dict'),
+ container_labels=dict(type='dict'),
+ mode=dict(
+ type='str',
+ default='replicated',
+ choices=['replicated', 'global']
+ ),
+ replicas=dict(type='int', default=-1),
+ endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']),
+ stop_grace_period=dict(type='str'),
+ stop_signal=dict(type='str'),
+ limits=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ read_only=dict(type='bool'),
+ reservations=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ resolve_image=dict(type='bool', default=False),
+ restart_config=dict(type='dict', options=dict(
+ condition=dict(type='str', choices=['none', 'on-failure', 'any']),
+ delay=dict(type='str'),
+ max_attempts=dict(type='int'),
+ window=dict(type='str'),
+ )),
+ rollback_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ user=dict(type='str'),
+ working_dir=dict(type='str'),
+ init=dict(type='bool'),
+ cap_add=dict(type='list', elements='str'),
+ cap_drop=dict(type='list', elements='str'),
+ )
+
+ option_minimal_versions = dict(
+ dns=dict(docker_py_version='2.6.0'),
+ dns_options=dict(docker_py_version='2.6.0'),
+ dns_search=dict(docker_py_version='2.6.0'),
+ endpoint_mode=dict(docker_py_version='3.0.0'),
+ force_update=dict(docker_py_version='2.1.0'),
+ healthcheck=dict(docker_py_version='2.6.0'),
+ hostname=dict(docker_py_version='2.2.0'),
+ hosts=dict(docker_py_version='2.6.0'),
+ groups=dict(docker_py_version='2.6.0'),
+ tty=dict(docker_py_version='2.4.0'),
+ secrets=dict(docker_py_version='2.4.0'),
+ configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ publish=dict(docker_py_version='3.0.0'),
+ read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'),
+ rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'),
+ init=dict(docker_py_version='4.0.0', docker_api_version='1.37'),
+ cap_add=dict(docker_py_version='5.0.3', docker_api_version='1.41'),
+ cap_drop=dict(docker_py_version='5.0.3', docker_api_version='1.41'),
+ # specials
+ publish_mode=dict(
+ docker_py_version='3.0.0',
+ detect_usage=_detect_publish_mode_usage,
+ usage_msg='set publish.mode'
+ ),
+ healthcheck_start_period=dict(
+ docker_py_version='2.6.0',
+ docker_api_version='1.29',
+ detect_usage=_detect_healthcheck_start_period,
+ usage_msg='set healthcheck.start_period'
+ ),
+ update_config_max_failure_ratio=dict(
+ docker_py_version='2.1.0',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'max_failure_ratio'
+ ) is not None,
+ usage_msg='set update_config.max_failure_ratio'
+ ),
+ update_config_failure_action=dict(
+ docker_py_version='3.5.0',
+ docker_api_version='1.28',
+ detect_usage=_detect_update_config_failure_action_rollback,
+ usage_msg='set update_config.failure_action.rollback'
+ ),
+ update_config_monitor=dict(
+ docker_py_version='2.1.0',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'monitor'
+ ) is not None,
+ usage_msg='set update_config.monitor'
+ ),
+ update_config_order=dict(
+ docker_py_version='2.7.0',
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set update_config.order'
+ ),
+ placement_config_preferences=dict(
+ docker_py_version='2.4.0',
+ docker_api_version='1.27',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'preferences'
+ ) is not None,
+ usage_msg='set placement.preferences'
+ ),
+ placement_config_constraints=dict(
+ docker_py_version='2.4.0',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'constraints'
+ ) is not None,
+ usage_msg='set placement.constraints'
+ ),
+ placement_config_replicas_max_per_node=dict(
+ docker_py_version='4.4.3',
+ docker_api_version='1.40',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'replicas_max_per_node'
+ ) is not None,
+ usage_msg='set placement.replicas_max_per_node'
+ ),
+ mounts_tmpfs=dict(
+ docker_py_version='2.6.0',
+ detect_usage=_detect_mount_tmpfs_usage,
+ usage_msg='set mounts.tmpfs'
+ ),
+ rollback_config_order=dict(
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set rollback_config.order'
+ ),
+ )
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='2.0.2',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ dsm = DockerServiceManager(client)
+ msg, changed, rebuilt, changes, facts = dsm.run_safe()
+
+ results = dict(
+ msg=msg,
+ changed=changed,
+ rebuilt=rebuilt,
+ changes=changes,
+ swarm_service=facts,
+ )
+ if client.module._diff:
+ before, after = dsm.diff_tracker.get_before_after()
+ results['diff'] = dict(before=before, after=after)
+
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py b/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py
new file mode 100644
index 000000000..ebe8a8e10
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py
@@ -0,0 +1,121 @@
+#!/usr/bin/python
+#
+# Copyright (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service_info
+
+short_description: Retrieves information about docker services from a Swarm Manager
+
+description:
+ - Retrieves information about a docker service.
+ - Essentially returns the output of C(docker service inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - The name of the service to inspect.
+ type: str
+ required: true
+
+author:
+ - Hannes Ljungberg (@hannseman)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get info from a service
+ community.docker.docker_swarm_service_info:
+ name: myservice
+ register: result
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the service exists.
+ type: bool
+ returned: always
+ sample: true
+service:
+ description:
+ - A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
+ - Will be C(none) if service does not exist.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+
+def get_service_info(client):
+ service = client.module.params['name']
+ return client.get_service_inspect(
+ service_id=service,
+ skip_missing=True
+ )
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.0.0',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ service = get_service_info(client)
+
+ client.module.exit_json(
+ changed=False,
+ service=service,
+ exists=bool(service)
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when Docker SDK for Python tried to talk to the docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_volume.py b/ansible_collections/community/docker/plugins/modules/docker_volume.py
new file mode 100644
index 000000000..09b1d386b
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_volume.py
@@ -0,0 +1,312 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume
+short_description: Manage Docker volumes
+description:
+ - Create/remove Docker volumes.
+ - Performs largely the same function as the C(docker volume) CLI subcommand.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+
+attributes:
+ check_mode:
+ support: full
+ diff_mode:
+ support: full
+
+options:
+ volume_name:
+ description:
+ - Name of the volume to operate on.
+ type: str
+ required: true
+ aliases:
+ - name
+
+ driver:
+ description:
+ - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
+ type: str
+ default: local
+
+ driver_options:
+ description:
+ - "Dictionary of volume settings. Consult docker docs for valid options and values:
+ U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)."
+ type: dict
+ default: {}
+
+ labels:
+ description:
+ - Dictionary of label key/values to set for the volume
+ type: dict
+
+ recreate:
+ description:
+ - Controls when a volume will be recreated when I(state) is C(present). Please
+ note that recreating an existing volume will cause B(any data in the existing volume
+ to be lost!) The volume will be deleted and a new volume with the same name will be
+ created.
+ - The value C(always) forces the volume to be always recreated.
+ - The value C(never) makes sure the volume will not be recreated.
+ - The value C(options-changed) makes sure the volume will be recreated if the volume
+ already exist and the driver, driver options or labels differ.
+ type: str
+ default: never
+ choices:
+ - always
+ - never
+ - options-changed
+
+ state:
+ description:
+ - C(absent) deletes the volume.
+ - C(present) creates the volume, if it does not already exist.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+author:
+ - Alex Grönholm (@agronholm)
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Create a volume
+ community.docker.docker_volume:
+ name: volume_one
+
+- name: Remove a volume
+ community.docker.docker_volume:
+ name: volume_one
+ state: absent
+
+- name: Create a volume with options
+ community.docker.docker_volume:
+ name: volume_two
+ driver_options:
+ type: btrfs
+ device: /dev/sda2
+'''
+
+RETURN = '''
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+from ansible.module_utils.six import iteritems
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DockerBaseClass,
+ DifferenceTracker,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import (
+ APIError,
+ DockerException,
+)
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.volume_name = None
+ self.driver = None
+ self.driver_options = None
+ self.labels = None
+ self.recreate = None
+ self.debug = None
+
+ for key, value in iteritems(client.module.params):
+ setattr(self, key, value)
+
+
+class DockerVolumeManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_volume = self.get_existing_volume()
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_volume(self):
+ try:
+ volumes = self.client.get_json('/volumes')
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ if volumes[u'Volumes'] is None:
+ return None
+
+ for volume in volumes[u'Volumes']:
+ if volume['Name'] == self.parameters.volume_name:
+ return volume
+
+ return None
+
+ def has_different_config(self):
+ """
+ Return the list of differences between the current parameters and the existing volume.
+
+ :return: list of options that differ
+ """
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
+ differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
+ if self.parameters.driver_options:
+ if not self.existing_volume.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=self.existing_volume.get('Options'))
+ else:
+ for key, value in iteritems(self.parameters.driver_options):
+ if (not self.existing_volume['Options'].get(key) or
+ value != self.existing_volume['Options'][key]):
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=self.existing_volume['Options'].get(key))
+ if self.parameters.labels:
+ existing_labels = self.existing_volume.get('Labels', {})
+ for label in self.parameters.labels:
+ if existing_labels.get(label) != self.parameters.labels.get(label):
+ differences.add('labels.%s' % label,
+ parameter=self.parameters.labels.get(label),
+ active=existing_labels.get(label))
+
+ return differences
+
+ def create_volume(self):
+ if not self.existing_volume:
+ if not self.check_mode:
+ try:
+ data = {
+ 'Name': self.parameters.volume_name,
+ 'Driver': self.parameters.driver,
+ 'DriverOpts': self.parameters.driver_options,
+ }
+ if self.parameters.labels is not None:
+ data['Labels'] = self.parameters.labels
+ resp = self.client.post_json_to_json('/volumes/create', data=data)
+ self.existing_volume = self.client.get_json('/volumes/{0}', resp['Name'])
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_volume(self):
+ if self.existing_volume:
+ if not self.check_mode:
+ try:
+ self.client.delete_call('/volumes/{0}', self.parameters.volume_name)
+ except APIError as e:
+ self.client.fail(to_native(e))
+
+ self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
+ self.results['changed'] = True
+
+ def present(self):
+ differences = DifferenceTracker()
+ if self.existing_volume:
+ differences = self.has_different_config()
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
+ if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
+ self.remove_volume()
+ self.existing_volume = None
+
+ self.create_volume()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ volume_facts = self.get_existing_volume()
+ self.results['volume'] = volume_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
+ self.remove_volume()
+
+
+def main():
+ argument_spec = dict(
+ volume_name=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='local'),
+ driver_options=dict(type='dict', default={}),
+ labels=dict(type='dict'),
+ recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
+ debug=dict(type='bool', default=False)
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ # "The docker server >= 1.9.0"
+ )
+
+ try:
+ cm = DockerVolumeManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/modules/docker_volume_info.py b/ansible_collections/community/docker/plugins/modules/docker_volume_info.py
new file mode 100644
index 000000000..100010ba4
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/modules/docker_volume_info.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume_info
+short_description: Retrieve facts about Docker volumes
+description:
+ - Performs largely the same function as the C(docker volume inspect) CLI subcommand.
+
+extends_documentation_fragment:
+ - community.docker.docker.api_documentation
+ - community.docker.attributes
+ - community.docker.attributes.actiongroup_docker
+ - community.docker.attributes.info_module
+
+options:
+ name:
+ description:
+ - Name of the volume to inspect.
+ type: str
+ required: true
+ aliases:
+ - volume_name
+
+author:
+ - Felix Fontein (@felixfontein)
+
+requirements:
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Get infos on volume
+ community.docker.docker_volume_info:
+ name: mydata
+ register: result
+
+- name: Does volume exist?
+ ansible.builtin.debug:
+ msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about volume
+ ansible.builtin.debug:
+ var: result.volume
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the volume exists.
+ type: bool
+ returned: always
+ sample: true
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Will be C(none) if volume does not exist.
+ returned: success
+ type: dict
+ sample: '{
+ "CreatedAt": "2018-12-09T17:43:44+01:00",
+ "Driver": "local",
+ "Labels": null,
+ "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
+ "Name": "ansible-test-bd3f6172",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+from ansible.module_utils.common.text.converters import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClient,
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound
+
+
+def get_existing_volume(client, volume_name):
+ try:
+ return client.get_json('/volumes/{0}', volume_name)
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ client.fail("Error inspecting volume: %s" % to_native(exc))
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['volume_name']),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ )
+
+ try:
+ volume = get_existing_volume(client, client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if volume else False),
+ volume=volume,
+ )
+ except DockerException as e:
+ client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail(
+ 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)),
+ exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/ansible_collections/community/docker/plugins/plugin_utils/common.py b/ansible_collections/community/docker/plugins/plugin_utils/common.py
new file mode 100644
index 000000000..f3c7c05c7
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/plugin_utils/common.py
@@ -0,0 +1,41 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.utils.display import Display
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClientBase,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DOCKER_COMMON_ARGS,
+)
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+ def __init__(self, plugin, min_docker_version=None, min_docker_api_version=None):
+ self.plugin = plugin
+ self.display = Display()
+ super(AnsibleDockerClient, self).__init__(
+ min_docker_version=min_docker_version,
+ min_docker_api_version=min_docker_api_version)
+
+ def fail(self, msg, **kwargs):
+ if kwargs:
+ msg += '\nContext:\n' + '\n'.join(' {0} = {1!r}'.format(k, v) for (k, v) in kwargs.items())
+ raise AnsibleConnectionFailure(msg)
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ self.display.deprecated(msg, version=version, date=date, collection_name=collection_name)
+
+ def _get_params(self):
+ return dict([
+ (option, self.plugin.get_option(option))
+ for option in DOCKER_COMMON_ARGS
+ ])
diff --git a/ansible_collections/community/docker/plugins/plugin_utils/common_api.py b/ansible_collections/community/docker/plugins/plugin_utils/common_api.py
new file mode 100644
index 000000000..eda28d481
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/plugin_utils/common_api.py
@@ -0,0 +1,40 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.errors import AnsibleConnectionFailure
+from ansible.utils.display import Display
+
+from ansible_collections.community.docker.plugins.module_utils.common_api import (
+ AnsibleDockerClientBase,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.util import (
+ DOCKER_COMMON_ARGS,
+)
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+ def __init__(self, plugin, min_docker_api_version=None):
+ self.plugin = plugin
+ self.display = Display()
+ super(AnsibleDockerClient, self).__init__(
+ min_docker_api_version=min_docker_api_version)
+
+ def fail(self, msg, **kwargs):
+ if kwargs:
+ msg += '\nContext:\n' + '\n'.join(' {0} = {1!r}'.format(k, v) for (k, v) in kwargs.items())
+ raise AnsibleConnectionFailure(msg)
+
+ def deprecate(self, msg, version=None, date=None, collection_name=None):
+ self.display.deprecated(msg, version=version, date=date, collection_name=collection_name)
+
+ def _get_params(self):
+ return dict([
+ (option, self.plugin.get_option(option))
+ for option in DOCKER_COMMON_ARGS
+ ])
diff --git a/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py b/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py
new file mode 100644
index 000000000..204996f24
--- /dev/null
+++ b/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py
@@ -0,0 +1,18 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt)
+# SPDX-License-Identifier: GPL-3.0-or-later
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.compat import selectors
+
+from ansible_collections.community.docker.plugins.module_utils.socket_handler import (
+ DockerSocketHandlerBase,
+)
+
+
+class DockerSocketHandler(DockerSocketHandlerBase):
+ def __init__(self, display, sock, log=None, container=None):
+ super(DockerSocketHandler, self).__init__(sock, selectors, log=lambda msg: display.vvvv(msg, host=container))