summaryrefslogtreecommitdiffstats
path: root/collections-debian-merged/ansible_collections/community/docker/plugins
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-14 20:03:01 +0000
commita453ac31f3428614cceb99027f8efbdb9258a40b (patch)
treef61f87408f32a8511cbd91799f9cececb53e0374 /collections-debian-merged/ansible_collections/community/docker/plugins
parentInitial commit. (diff)
downloadansible-a453ac31f3428614cceb99027f8efbdb9258a40b.tar.xz
ansible-a453ac31f3428614cceb99027f8efbdb9258a40b.zip
Adding upstream version 2.10.7+merged+base+2.10.8+dfsg.upstream/2.10.7+merged+base+2.10.8+dfsgupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'collections-debian-merged/ansible_collections/community/docker/plugins')
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker.py370
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker_api.py386
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/doc_fragments/docker.py142
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_containers.py321
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_machine.py271
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_swarm.py254
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/__init__.py0
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/common.py1022
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/swarm.py280
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/current_container_facts.py100
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_compose.py1148
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_config.py299
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container.py3591
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container_info.py145
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_host_info.py343
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image.py854
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image_info.py267
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_login.py486
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network.py672
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network_info.py141
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node.py294
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node_info.py156
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_prune.py265
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_secret.py302
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_info.py83
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py94
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm.py662
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py384
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py2800
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py115
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume.py308
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume_info.py128
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/common.py33
-rw-r--r--collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py221
35 files changed, 17245 insertions, 0 deletions
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker.py b/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker.py
new file mode 100644
index 00000000..71e00761
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker.py
@@ -0,0 +1,370 @@
+# Based on the chroot connection plugin by Maykel Moya
+#
+# (c) 2014, Lorin Hochstein
+# (c) 2015, Leendert Brouwer (https://github.com/objectified)
+# (c) 2015, Toshio Kuratomi <tkuratomi@ansible.com>
+# Copyright (c) 2017 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ author:
+ - Lorin Hochestein (!UNKNOWN)
+ - Leendert Brouwer (!UNKNOWN)
+ name: docker
+ short_description: Run tasks in docker containers
+ description:
+ - Run commands or put/fetch files to an existing docker container.
+ - Uses the Docker CLI to execute commands in the container. If you prefer
+ to directly connect to the Docker daemon, use the
+ R(community.docker.docker_api,ansible_collections.community.docker.docker_api_connection)
+ connection plugin.
+ options:
+ remote_user:
+ description:
+ - The user to execute as inside the container
+ vars:
+ - name: ansible_user
+ - name: ansible_docker_user
+ docker_extra_args:
+ description:
+ - Extra arguments to pass to the docker command line
+ default: ''
+ remote_addr:
+ description:
+ - The name of the container you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_docker_host
+'''
+
+import distutils.spawn
+import fcntl
+import os
+import os.path
+import subprocess
+import re
+
+from distutils.version import LooseVersion
+
+import ansible.constants as C
+from ansible.compat import selectors
+from ansible.errors import AnsibleError, AnsibleFileNotFound
+from ansible.module_utils.six.moves import shlex_quote
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase, BUFSIZE
+from ansible.utils.display import Display
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local docker based connections '''
+
+ transport = 'community.docker.docker'
+ has_pipelining = True
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ # Note: docker supports running as non-root in some configurations.
+ # (For instance, setting the UNIX socket file to be readable and
+ # writable by a specific UNIX group and then putting users into that
+ # group). Therefore we don't check that the user is root when using
+ # this connection. But if the user is getting a permission denied
+ # error it probably means that docker on their system is only
+ # configured to be connected to by root and they are not running as
+ # root.
+
+ # Windows uses Powershell modules
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+
+ if 'docker_command' in kwargs:
+ self.docker_cmd = kwargs['docker_command']
+ else:
+ self.docker_cmd = distutils.spawn.find_executable('docker')
+ if not self.docker_cmd:
+ raise AnsibleError("docker command not found in PATH")
+
+ docker_version = self._get_docker_version()
+ if docker_version == u'dev':
+ display.warning(u'Docker version number is "dev". Will assume latest version.')
+ if docker_version != u'dev' and LooseVersion(docker_version) < LooseVersion(u'1.3'):
+ raise AnsibleError('docker connection type requires docker 1.3 or higher')
+
+ # The remote user we will request from docker (if supported)
+ self.remote_user = None
+ # The actual user which will execute commands in docker (if known)
+ self.actual_user = None
+
+ if self._play_context.remote_user is not None:
+ if docker_version == u'dev' or LooseVersion(docker_version) >= LooseVersion(u'1.7'):
+ # Support for specifying the exec user was added in docker 1.7
+ self.remote_user = self._play_context.remote_user
+ self.actual_user = self.remote_user
+ else:
+ self.actual_user = self._get_docker_remote_user()
+
+ if self.actual_user != self._play_context.remote_user:
+ display.warning(u'docker {0} does not support remote_user, using container default: {1}'
+ .format(docker_version, self.actual_user or u'?'))
+ elif self._display.verbosity > 2:
+ # Since we're not setting the actual_user, look it up so we have it for logging later
+ # Only do this if display verbosity is high enough that we'll need the value
+ # This saves overhead from calling into docker when we don't need to
+ self.actual_user = self._get_docker_remote_user()
+
+ @staticmethod
+ def _sanitize_version(version):
+ version = re.sub(u'[^0-9a-zA-Z.]', u'', version)
+ version = re.sub(u'^v', u'', version)
+ return version
+
+ def _old_docker_version(self):
+ cmd_args = []
+ if self._play_context.docker_extra_args:
+ cmd_args += self._play_context.docker_extra_args.split(' ')
+
+ old_version_subcommand = ['version']
+
+ old_docker_cmd = [self.docker_cmd] + cmd_args + old_version_subcommand
+ p = subprocess.Popen(old_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+
+ return old_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _new_docker_version(self):
+ # no result yet, must be newer Docker version
+ cmd_args = []
+ if self._play_context.docker_extra_args:
+ cmd_args += self._play_context.docker_extra_args.split(' ')
+
+ new_version_subcommand = ['version', '--format', "'{{.Server.Version}}'"]
+
+ new_docker_cmd = [self.docker_cmd] + cmd_args + new_version_subcommand
+ p = subprocess.Popen(new_docker_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ cmd_output, err = p.communicate()
+ return new_docker_cmd, to_native(cmd_output), err, p.returncode
+
+ def _get_docker_version(self):
+
+ cmd, cmd_output, err, returncode = self._old_docker_version()
+ if returncode == 0:
+ for line in to_text(cmd_output, errors='surrogate_or_strict').split(u'\n'):
+ if line.startswith(u'Server version:'): # old docker versions
+ return self._sanitize_version(line.split()[2])
+
+ cmd, cmd_output, err, returncode = self._new_docker_version()
+ if returncode:
+ raise AnsibleError('Docker version check (%s) failed: %s' % (to_native(cmd), to_native(err)))
+
+ return self._sanitize_version(to_text(cmd_output, errors='surrogate_or_strict'))
+
+ def _get_docker_remote_user(self):
+ """ Get the default user configured in the docker container """
+ p = subprocess.Popen([self.docker_cmd, 'inspect', '--format', '{{.Config.User}}', self._play_context.remote_addr],
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+
+ out, err = p.communicate()
+ out = to_text(out, errors='surrogate_or_strict')
+
+ if p.returncode != 0:
+ display.warning(u'unable to retrieve default user from docker container: %s %s' % (out, to_text(err)))
+ return None
+
+ # The default exec user is root, unless it was changed in the Dockerfile with USER
+ return out.strip() or u'root'
+
+ def _build_exec_cmd(self, cmd):
+ """ Build the local docker exec command to run cmd on remote_host
+
+ If remote_user is available and is supported by the docker
+ version we are using, it will be provided to docker exec.
+ """
+
+ local_cmd = [self.docker_cmd]
+
+ if self._play_context.docker_extra_args:
+ local_cmd += self._play_context.docker_extra_args.split(' ')
+
+ local_cmd += [b'exec']
+
+ if self.remote_user is not None:
+ local_cmd += [b'-u', self.remote_user]
+
+ # -i is needed to keep stdin open which allows pipelining to work
+ local_cmd += [b'-i', self._play_context.remote_addr] + cmd
+
+ return local_cmd
+
+ def _connect(self, port=None):
+ """ Connect to the container. Nothing to do """
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
+ self.actual_user or u'?'), host=self._play_context.remote_addr
+ )
+ self._connected = True
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ Run a command on the docker host """
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ local_cmd = self._build_exec_cmd([self._play_context.executable, '-c', cmd])
+
+ display.vvv(u"EXEC {0}".format(to_text(local_cmd)), host=self._play_context.remote_addr)
+ display.debug("opening command with Popen()")
+
+ local_cmd = [to_bytes(i, errors='surrogate_or_strict') for i in local_cmd]
+
+ p = subprocess.Popen(
+ local_cmd,
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ )
+ display.debug("done running command with Popen()")
+
+ if self.become and self.become.expect_prompt() and sudoable:
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) | os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) | os.O_NONBLOCK)
+ selector = selectors.DefaultSelector()
+ selector.register(p.stdout, selectors.EVENT_READ)
+ selector.register(p.stderr, selectors.EVENT_READ)
+
+ become_output = b''
+ try:
+ while not self.become.check_success(become_output) and not self.become.check_password_prompt(become_output):
+ events = selector.select(self._play_context.timeout)
+ if not events:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output))
+
+ for key, event in events:
+ if key.fileobj == p.stdout:
+ chunk = p.stdout.read()
+ elif key.fileobj == p.stderr:
+ chunk = p.stderr.read()
+
+ if not chunk:
+ stdout, stderr = p.communicate()
+ raise AnsibleError('privilege output closed while waiting for password prompt:\n' + to_native(become_output))
+ become_output += chunk
+ finally:
+ selector.close()
+
+ if not self.become.check_success(become_output):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ p.stdin.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+ fcntl.fcntl(p.stdout, fcntl.F_SETFL, fcntl.fcntl(p.stdout, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+ fcntl.fcntl(p.stderr, fcntl.F_SETFL, fcntl.fcntl(p.stderr, fcntl.F_GETFL) & ~os.O_NONBLOCK)
+
+ display.debug("getting output with communicate()")
+ stdout, stderr = p.communicate(in_data)
+ display.debug("done communicating")
+
+ display.debug("done with docker.exec_command()")
+ return (p.returncode, stdout, stderr)
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ return ntpath.normpath(remote_path)
+ else:
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ Transfer a file from local to docker container """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ out_path = self._prefix_login_path(out_path)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound(
+ "file or module does not exist: %s" % to_native(in_path))
+
+ out_path = shlex_quote(out_path)
+ # Older docker doesn't have native support for copying files into
+ # running containers, so we use docker exec to implement this
+ # Although docker version 1.8 and later provide support, the
+ # owner and group of the files are always set to root
+ with open(to_bytes(in_path, errors='surrogate_or_strict'), 'rb') as in_file:
+ if not os.fstat(in_file.fileno()).st_size:
+ count = ' count=0'
+ else:
+ count = ''
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd of=%s bs=%s%s" % (out_path, BUFSIZE, count)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ try:
+ p = subprocess.Popen(args, stdin=in_file,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to transfer file %s to %s:\n%s\n%s" %
+ (to_native(in_path), to_native(out_path), to_native(stdout), to_native(stderr)))
+
+ def fetch_file(self, in_path, out_path):
+ """ Fetch a file from container to local. """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ in_path = self._prefix_login_path(in_path)
+ # out_path is the final file path, but docker takes a directory, not a
+ # file path
+ out_dir = os.path.dirname(out_path)
+
+ args = [self.docker_cmd, "cp", "%s:%s" % (self._play_context.remote_addr, in_path), out_dir]
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ p.communicate()
+
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ actual_out_path = ntpath.join(out_dir, ntpath.basename(in_path))
+ else:
+ actual_out_path = os.path.join(out_dir, os.path.basename(in_path))
+
+ if p.returncode != 0:
+ # Older docker doesn't have native support for fetching files command `cp`
+ # If `cp` fails, try to use `dd` instead
+ args = self._build_exec_cmd([self._play_context.executable, "-c", "dd if=%s bs=%s" % (in_path, BUFSIZE)])
+ args = [to_bytes(i, errors='surrogate_or_strict') for i in args]
+ with open(to_bytes(actual_out_path, errors='surrogate_or_strict'), 'wb') as out_file:
+ try:
+ p = subprocess.Popen(args, stdin=subprocess.PIPE,
+ stdout=out_file, stderr=subprocess.PIPE)
+ except OSError:
+ raise AnsibleError("docker connection requires dd command in the container to put files")
+ stdout, stderr = p.communicate()
+
+ if p.returncode != 0:
+ raise AnsibleError("failed to fetch file %s to %s:\n%s\n%s" % (in_path, out_path, stdout, stderr))
+
+ # Rename if needed
+ if actual_out_path != out_path:
+ os.rename(to_bytes(actual_out_path, errors='strict'), to_bytes(out_path, errors='strict'))
+
+ def close(self):
+ """ Terminate the connection. Nothing to do for Docker"""
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker_api.py b/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker_api.py
new file mode 100644
index 00000000..d1cccf81
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/connection/docker_api.py
@@ -0,0 +1,386 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+author:
+ - Felix Fontein (@felixfontein)
+name: docker_api
+short_description: Run tasks in docker containers
+version_added: 1.1.0
+description:
+ - Run commands or put/fetch files to an existing docker container.
+ - Uses Docker SDK for Python to interact directly with the Docker daemon instead of
+ using the Docker CLI. Use the
+ R(community.docker.docker,ansible_collections.community.docker.docker_connection)
+ connection plugin if you want to use the Docker CLI.
+options:
+ remote_user:
+ type: str
+ description:
+ - The user to execute as inside the container.
+ vars:
+ - name: ansible_user
+ - name: ansible_docker_user
+ remote_addr:
+ type: str
+ description:
+ - The name of the container you want to access.
+ default: inventory_hostname
+ vars:
+ - name: ansible_host
+ - name: ansible_docker_host
+
+ # The following are options from the docs fragment. We want to allow the user to
+ # specify them with Ansible variables.
+ docker_host:
+ vars:
+ - name: ansible_docker_docker_host
+ tls_hostname:
+ vars:
+ - name: ansible_docker_tls_hostname
+ api_version:
+ vars:
+ - name: ansible_docker_api_version
+ timeout:
+ vars:
+ - name: ansible_docker_timeout
+ ca_cert:
+ vars:
+ - name: ansible_docker_ca_cert
+ client_cert:
+ vars:
+ - name: ansible_docker_client_cert
+ client_key:
+ vars:
+ - name: ansible_docker_client_key
+ ssl_version:
+ vars:
+ - name: ansible_docker_ssl_version
+ tls:
+ vars:
+ - name: ansible_docker_tls
+ validate_certs:
+ vars:
+ - name: ansible_docker_validate_certs
+
+extends_documentation_fragment:
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+'''
+
+import io
+import os
+import os.path
+import shutil
+import tarfile
+
+from ansible.errors import AnsibleFileNotFound, AnsibleConnectionFailure
+from ansible.module_utils._text import to_bytes, to_native, to_text
+from ansible.plugins.connection import ConnectionBase
+from ansible.utils.display import Display
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.plugin_utils.socket_handler import (
+ DockerSocketHandler,
+)
+from ansible_collections.community.docker.plugins.plugin_utils.common import (
+ AnsibleDockerClient,
+)
+
+try:
+ from docker.errors import DockerException, APIError, NotFound
+except Exception:
+ # missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
+ pass
+
+MIN_DOCKER_PY = '1.7.0'
+MIN_DOCKER_API = None
+
+
+display = Display()
+
+
+class Connection(ConnectionBase):
+ ''' Local docker based connections '''
+
+ transport = 'community.docker.docker_api'
+ has_pipelining = True
+
+ def _call_client(self, play_context, callable, not_found_can_be_resource=False):
+ try:
+ return callable()
+ except NotFound as e:
+ if not_found_can_be_resource:
+ raise AnsibleConnectionFailure('Could not find container "{1}" or resource in it ({0})'.format(e, play_context.remote_addr))
+ else:
+ raise AnsibleConnectionFailure('Could not find container "{1}" ({0})'.format(e, play_context.remote_addr))
+ except APIError as e:
+ if e.response and e.response.status_code == 409:
+ raise AnsibleConnectionFailure('The container "{1}" has been paused ({0})'.format(e, play_context.remote_addr))
+ self.client.fail(
+ 'An unexpected docker error occurred for container "{1}": {0}'.format(e, play_context.remote_addr)
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'An unexpected docker error occurred for container "{1}": {0}'.format(e, play_context.remote_addr)
+ )
+ except RequestException as e:
+ self.client.fail(
+ 'An unexpected requests error occurred for container "{1}" when docker-py tried to talk to the docker daemon: {0}'
+ .format(e, play_context.remote_addr)
+ )
+
+ def __init__(self, play_context, new_stdin, *args, **kwargs):
+ super(Connection, self).__init__(play_context, new_stdin, *args, **kwargs)
+
+ self.client = None
+ self.ids = dict()
+
+ # Windows uses Powershell modules
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ self.module_implementation_preferences = ('.ps1', '.exe', '')
+
+ self.actual_user = play_context.remote_user
+
+ def _connect(self, port=None):
+ """ Connect to the container. Nothing to do """
+ super(Connection, self)._connect()
+ if not self._connected:
+ display.vvv(u"ESTABLISH DOCKER CONNECTION FOR USER: {0}".format(
+ self.actual_user or u'?'), host=self._play_context.remote_addr
+ )
+ if self.client is None:
+ self.client = AnsibleDockerClient(self, min_docker_version=MIN_DOCKER_PY, min_docker_api_version=MIN_DOCKER_API)
+ self._connected = True
+
+ if self.actual_user is None and display.verbosity > 2:
+ # Since we're not setting the actual_user, look it up so we have it for logging later
+ # Only do this if display verbosity is high enough that we'll need the value
+ # This saves overhead from calling into docker when we don't need to
+ display.vvv(u"Trying to determine actual user")
+ result = self._call_client(self._play_context, lambda: self.client.inspect_container(self._play_context.remote_addr))
+ if result.get('Config'):
+ self.actual_user = result['Config'].get('User')
+ if self.actual_user is not None:
+ display.vvv(u"Actual user is '{0}'".format(self.actual_user))
+
+ def exec_command(self, cmd, in_data=None, sudoable=False):
+ """ Run a command on the docker host """
+
+ super(Connection, self).exec_command(cmd, in_data=in_data, sudoable=sudoable)
+
+ command = [self._play_context.executable, '-c', to_text(cmd)]
+
+ do_become = self.become and self.become.expect_prompt() and sudoable
+
+ display.vvv(
+ u"EXEC {0}{1}{2}".format(
+ to_text(command),
+ ', with stdin ({0} bytes)'.format(len(in_data)) if in_data is not None else '',
+ ', with become prompt' if do_become else '',
+ ),
+ host=self._play_context.remote_addr
+ )
+
+ need_stdin = True if (in_data is not None) or do_become else False
+
+ exec_data = self._call_client(self._play_context, lambda: self.client.exec_create(
+ self._play_context.remote_addr,
+ command,
+ stdout=True,
+ stderr=True,
+ stdin=need_stdin,
+ user=self._play_context.remote_user or '',
+ workdir=None,
+ ))
+ exec_id = exec_data['Id']
+
+ if need_stdin:
+ exec_socket = self._call_client(self._play_context, lambda: self.client.exec_start(
+ exec_id,
+ detach=False,
+ socket=True,
+ ))
+ try:
+ with DockerSocketHandler(display, exec_socket, container=self._play_context.remote_addr) as exec_socket_handler:
+ if do_become:
+ become_output = [b'']
+
+ def append_become_output(stream_id, data):
+ become_output[0] += data
+
+ exec_socket_handler.set_block_done_callback(append_become_output)
+
+ while not self.become.check_success(become_output[0]) and not self.become.check_password_prompt(become_output[0]):
+ if not exec_socket_handler.select(self._play_context.timeout):
+ stdout, stderr = exec_socket_handler.consume()
+ raise AnsibleConnectionFailure('timeout waiting for privilege escalation password prompt:\n' + to_native(become_output[0]))
+
+ if exec_socket_handler.is_eof():
+ raise AnsibleConnectionFailure('privilege output closed while waiting for password prompt:\n' + to_native(become_output[0]))
+
+ if not self.become.check_success(become_output[0]):
+ become_pass = self.become.get_option('become_pass', playcontext=self._play_context)
+ exec_socket_handler.write(to_bytes(become_pass, errors='surrogate_or_strict') + b'\n')
+
+ if in_data is not None:
+ exec_socket_handler.write(in_data)
+
+ stdout, stderr = exec_socket_handler.consume()
+ finally:
+ exec_socket.close()
+ else:
+ stdout, stderr = self._call_client(self._play_context, lambda: self.client.exec_start(
+ exec_id,
+ detach=False,
+ stream=False,
+ socket=False,
+ demux=True,
+ ))
+
+ result = self._call_client(self._play_context, lambda: self.client.exec_inspect(exec_id))
+
+ return result.get('ExitCode') or 0, stdout or b'', stderr or b''
+
+ def _prefix_login_path(self, remote_path):
+ ''' Make sure that we put files into a standard path
+
+ If a path is relative, then we need to choose where to put it.
+ ssh chooses $HOME but we aren't guaranteed that a home dir will
+ exist in any given chroot. So for now we're choosing "/" instead.
+ This also happens to be the former default.
+
+ Can revisit using $HOME instead if it's a problem
+ '''
+ if getattr(self._shell, "_IS_WINDOWS", False):
+ import ntpath
+ return ntpath.normpath(remote_path)
+ else:
+ if not remote_path.startswith(os.path.sep):
+ remote_path = os.path.join(os.path.sep, remote_path)
+ return os.path.normpath(remote_path)
+
+ def put_file(self, in_path, out_path):
+ """ Transfer a file from local to docker container """
+ super(Connection, self).put_file(in_path, out_path)
+ display.vvv("PUT %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ out_path = self._prefix_login_path(out_path)
+ if not os.path.exists(to_bytes(in_path, errors='surrogate_or_strict')):
+ raise AnsibleFileNotFound(
+ "file or module does not exist: %s" % to_native(in_path))
+
+ if self.actual_user not in self.ids:
+ dummy, ids, dummy = self.exec_command(b'id -u && id -g')
+ try:
+ user_id, group_id = ids.splitlines()
+ self.ids[self.actual_user] = int(user_id), int(group_id)
+ display.vvvv(
+ 'PUT: Determined uid={0} and gid={1} for user "{2}"'.format(user_id, group_id, self.actual_user),
+ host=self._play_context.remote_addr
+ )
+ except Exception as e:
+ raise AnsibleConnectionFailure(
+ 'Error while determining user and group ID of current user in container "{1}": {0}\nGot value: {2!r}'
+ .format(e, self._play_context.remote_addr, ids)
+ )
+
+ b_in_path = to_bytes(in_path, errors='surrogate_or_strict')
+
+ out_dir, out_file = os.path.split(out_path)
+
+ # TODO: stream tar file, instead of creating it in-memory into a BytesIO
+
+ bio = io.BytesIO()
+ with tarfile.open(fileobj=bio, mode='w|', dereference=True, encoding='utf-8') as tar:
+ # Note that without both name (bytes) and arcname (unicode), this either fails for
+ # Python 2.6/2.7, Python 3.5/3.6, or Python 3.7+. Only when passing both (in this
+ # form) it works with Python 2.6, 2.7, 3.5, 3.6, and 3.7 up to 3.9.
+ tarinfo = tar.gettarinfo(b_in_path, arcname=to_text(out_file))
+ user_id, group_id = self.ids[self.actual_user]
+ tarinfo.uid = user_id
+ tarinfo.uname = ''
+ if self.actual_user:
+ tarinfo.uname = self.actual_user
+ tarinfo.gid = group_id
+ tarinfo.gname = ''
+ tarinfo.mode &= 0o700
+ with open(b_in_path, 'rb') as f:
+ tar.addfile(tarinfo, fileobj=f)
+ data = bio.getvalue()
+
+ ok = self._call_client(self._play_context, lambda: self.client.put_archive(
+ self._play_context.remote_addr,
+ out_dir,
+ data, # can also be file object for streaming; this is only clear from the
+ # implementation of put_archive(), which uses requests's put().
+ # See https://2.python-requests.org/en/master/user/advanced/#streaming-uploads
+ # WARNING: might not work with all transports!
+ ), not_found_can_be_resource=True)
+ if not ok:
+ raise AnsibleConnectionFailure(
+ 'Unknown error while creating file "{0}" in container "{1}".'
+ .format(out_path, self._play_context.remote_addr)
+ )
+
+ def fetch_file(self, in_path, out_path):
+ """ Fetch a file from container to local. """
+ super(Connection, self).fetch_file(in_path, out_path)
+ display.vvv("FETCH %s TO %s" % (in_path, out_path), host=self._play_context.remote_addr)
+
+ in_path = self._prefix_login_path(in_path)
+ b_out_path = to_bytes(out_path, errors='surrogate_or_strict')
+
+ considered_in_paths = set()
+
+ while True:
+ if in_path in considered_in_paths:
+ raise AnsibleConnectionFailure('Found infinite symbolic link loop when trying to fetch "{0}"'.format(in_path))
+ considered_in_paths.add(in_path)
+
+ display.vvvv('FETCH: Fetching "%s"' % in_path, host=self._play_context.remote_addr)
+ stream, stats = self._call_client(self._play_context, lambda: self.client.get_archive(
+ self._play_context.remote_addr,
+ in_path,
+ ), not_found_can_be_resource=True)
+
+ # TODO: stream tar file instead of downloading it into a BytesIO
+
+ bio = io.BytesIO()
+ for chunk in stream:
+ bio.write(chunk)
+ bio.seek(0)
+
+ with tarfile.open(fileobj=bio, mode='r|') as tar:
+ symlink_member = None
+ first = True
+ for member in tar:
+ if not first:
+ raise AnsibleConnectionFailure('Received tarfile contains more than one file!')
+ first = False
+ if member.issym():
+ symlink_member = member
+ continue
+ if not member.isfile():
+ raise AnsibleConnectionFailure('Remote file "%s" is not a regular file or a symbolic link' % in_path)
+ in_f = tar.extractfile(member) # in Python 2, this *cannot* be used in `with`...
+ with open(b_out_path, 'wb') as out_f:
+ shutil.copyfileobj(in_f, out_f, member.size)
+ if first:
+ raise AnsibleConnectionFailure('Received tarfile is empty!')
+ # If the only member was a file, it's already extracted. If it is a symlink, process it now.
+ if symlink_member is not None:
+ in_path = os.path.join(os.path.split(in_path)[0], symlink_member.linkname)
+ display.vvvv('FETCH: Following symbolic link to "%s"' % in_path, host=self._play_context.remote_addr)
+ continue
+ return
+
+ def close(self):
+ """ Terminate the connection. Nothing to do for Docker"""
+ super(Connection, self).close()
+ self._connected = False
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/doc_fragments/docker.py b/collections-debian-merged/ansible_collections/community/docker/plugins/doc_fragments/docker.py
new file mode 100644
index 00000000..f0d06e64
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/doc_fragments/docker.py
@@ -0,0 +1,142 @@
+# -*- coding: utf-8 -*-
+
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+class ModuleDocFragment(object):
+
+ # Docker doc fragment
+ DOCUMENTATION = r'''
+
+options:
+ docker_host:
+ description:
+ - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the
+ TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection,
+ the module will automatically replace C(tcp) in the connection URL with C(https).
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: unix://var/run/docker.sock
+ aliases: [ docker_url ]
+ tls_hostname:
+ description:
+ - When verifying the authenticity of the Docker Host server, provide the expected name of the server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will
+ be used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: localhost
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: str
+ default: auto
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ ca_cert:
+ description:
+ - Use a CA certificate when performing server verification by providing the path to a CA certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description:
+ - Path to the client's TLS certificate file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ client_key:
+ description:
+ - Path to the client's TLS key file.
+ - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set,
+ the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ssl_version:
+ description:
+ - Provide a valid SSL version number. Default value determined by ssl.py module.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be
+ used instead.
+ type: str
+ tls:
+ description:
+ - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host
+ server. Note that if I(validate_certs) is set to C(yes) as well, it will take precedence.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used
+ instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: no
+ validate_certs:
+ description:
+ - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be
+ used instead. If the environment variable is not set, the default value will be used.
+ type: bool
+ default: no
+ aliases: [ tls_verify ]
+ debug:
+ description:
+ - Debug mode
+ type: bool
+ default: no
+
+notes:
+ - Connect to the Docker daemon by providing parameters with each task or by defining environment variables.
+ You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION),
+ C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped
+ with the product that sets up the environment. It will set these variables for you. See
+ U(https://docs.docker.com/machine/reference/env/) for more details.
+ - When connecting to Docker daemon with TLS, you might need to install additional Python packages.
+ For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip).
+ - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions.
+ In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified,
+ and use C($DOCKER_CONFIG/config.json) otherwise.
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version < 2.0
+
+ DOCKER_PY_1_DOCUMENTATION = r'''
+options: {}
+notes:
+ - This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
+ communicate with the Docker daemon.
+requirements:
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details).
+ For Python 2.6, C(docker-py) must be used. Otherwise, it is recommended to
+ install the C(docker) Python module. Note that both modules should *not*
+ be installed at the same time. Also note that when both modules are installed
+ and one of them is uninstalled, the other might no longer function and a
+ reinstall of it is required."
+'''
+
+ # Additional, more specific stuff for minimal Docker SDK for Python version >= 2.0.
+ # Note that Docker SDK for Python >= 2.0 requires Python 2.7 or newer.
+
+ DOCKER_PY_2_DOCUMENTATION = r'''
+options: {}
+notes:
+ - This module uses the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to
+ communicate with the Docker daemon.
+requirements:
+ - "Python >= 2.7"
+ - "Docker SDK for Python: Please note that the L(docker-py,https://pypi.org/project/docker-py/)
+ Python module has been superseded by L(docker,https://pypi.org/project/docker/)
+ (see L(here,https://github.com/docker/docker-py/issues/1310) for details).
+ This module does *not* work with docker-py."
+'''
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_containers.py b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_containers.py
new file mode 100644
index 00000000..ef2697a6
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_containers.py
@@ -0,0 +1,321 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2020, Felix Fontein <felix@fontein.de>
+# For the parts taken from the docker inventory script:
+# Copyright (c) 2016, Paul Durivage <paul.durivage@gmail.com>
+# Copyright (c) 2016, Chris Houseknecht <house@redhat.com>
+# Copyright (c) 2016, James Tanner <jtanner@redhat.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+name: docker_containers
+short_description: Ansible dynamic inventory plugin for Docker containers.
+version_added: 1.1.0
+author:
+ - Felix Fontein (@felixfontein)
+requirements:
+ - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
+extends_documentation_fragment:
+ - ansible.builtin.constructed
+ - community.docker.docker
+ - community.docker.docker.docker_py_1_documentation
+description:
+ - Reads inventories from the Docker API.
+ - Uses a YAML configuration file that ends with C(docker.[yml|yaml]).
+options:
+ plugin:
+ description:
+ - The name of this plugin, it should always be set to C(community.docker.docker_containers)
+ for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices: [ community.docker.docker_containers ]
+
+ connection_type:
+ description:
+ - Which connection type to use the containers.
+ - Default is to use SSH (C(ssh)). For this, the options I(default_ip) and
+ I(private_ssh_port) are used.
+ - Alternatively, C(docker-cli) selects the
+ R(docker connection plugin,ansible_collections.community.docker.docker_connection),
+ and C(docker-api) selects the
+ R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection).
+ type: str
+ default: docker-api
+ choices:
+ - ssh
+ - docker-cli
+ - docker-api
+
+ verbose_output:
+ description:
+ - Toggle to (not) include all available inspection metadata.
+ - Note that all top-level keys will be transformed to the format C(docker_xxx).
+ For example, C(HostConfig) is converted to C(docker_hostconfig).
+ - If this is C(false), these values can only be used during I(constructed), I(groups), and I(keyed_groups).
+ - The C(docker) inventory script always added these variables, so for compatibility set this to C(true).
+ type: bool
+ default: false
+
+ default_ip:
+ description:
+ - The IP address to assign to ansible_host when the container's SSH port is mapped to interface
+ '0.0.0.0'.
+ - Only used if I(connection_type) is C(ssh).
+ type: str
+ default: 127.0.0.1
+
+ private_ssh_port:
+ description:
+ - The port containers use for SSH.
+ - Only used if I(connection_type) is C(ssh).
+ type: int
+ default: 22
+
+ add_legacy_groups:
+ description:
+ - "Add the same groups as the C(docker) inventory script does. These are the following:"
+ - "C(<container id>): contains the container of this ID."
+ - "C(<container name>): contains the container that has this name."
+ - "C(<container short id>): contains the containers that have this short ID (first 13 letters of ID)."
+ - "C(image_<image name>): contains the containers that have the image C(<image name>)."
+ - "C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>)."
+ - "C(service_<service name>): contains the containers that belong to the service C(<service name>)"
+ - "C(<docker_host>): contains the containers which belong to the Docker daemon I(docker_host).
+ Useful if you run this plugin against multiple Docker daemons."
+ - "C(running): contains all containers that are running."
+ - "C(stopped): contains all containers that are not running."
+ - If this is not set to C(true), you should use keyed groups to add the containers to groups.
+ See the examples for how to do that.
+ type: bool
+ default: false
+'''
+
+EXAMPLES = '''
+# Minimal example using local Docker daemon
+plugin: community.docker.docker_containers
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote Docker daemon
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote Docker daemon with unverified TLS
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2376
+tls: true
+
+# Example using remote Docker daemon with verified TLS and client certificate verification
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2376
+validate_certs: true
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups
+plugin: community.docker.docker_containers
+docker_host: tcp://my-docker-host:2375
+strict: false
+keyed_groups:
+ # Add containers with primary network foo to a network_foo group
+ - prefix: network
+ key: 'docker_hostconfig.NetworkMode'
+ # Add Linux hosts to an os_linux group
+ - prefix: os
+ key: docker_platform
+'''
+
+import re
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible_collections.community.docker.plugins.module_utils.common import update_tls_hostname, get_connect_params
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.parsing.utils.addresses import parse_address
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.plugin_utils.common import (
+ AnsibleDockerClient,
+)
+
+try:
+ from docker.errors import DockerException, APIError, NotFound
+except Exception:
+ # missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
+ pass
+
+MIN_DOCKER_PY = '1.7.0'
+MIN_DOCKER_API = None
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker daemon as source. '''
+
+ NAME = 'community.docker.docker_containers'
+
+ def _slugify(self, value):
+ return 'docker_%s' % (re.sub(r'[^\w-]', '_', value).lower().lstrip('_'))
+
+ def _populate(self, client):
+ strict = self.get_option('strict')
+
+ ssh_port = self.get_option('private_ssh_port')
+ default_ip = self.get_option('default_ip')
+ hostname = self.get_option('docker_host')
+ verbose_output = self.get_option('verbose_output')
+ connection_type = self.get_option('connection_type')
+ add_legacy_groups = self.get_option('add_legacy_groups')
+
+ try:
+ containers = client.containers(all=True)
+ except APIError as exc:
+ raise AnsibleError("Error listing containers: %s" % to_native(exc))
+
+ if add_legacy_groups:
+ self.inventory.add_group('running')
+ self.inventory.add_group('stopped')
+
+ for container in containers:
+ id = container.get('Id')
+ short_id = id[:13]
+
+ try:
+ name = container.get('Names', list())[0].lstrip('/')
+ full_name = name
+ except IndexError:
+ name = short_id
+ full_name = id
+
+ self.inventory.add_host(name)
+ facts = dict(
+ docker_name=name,
+ docker_short_id=short_id
+ )
+ full_facts = dict()
+
+ try:
+ inspect = client.inspect_container(id)
+ except APIError as exc:
+ raise AnsibleError("Error inspecting container %s - %s" % (name, str(exc)))
+
+ state = inspect.get('State') or dict()
+ config = inspect.get('Config') or dict()
+ labels = config.get('Labels') or dict()
+
+ running = state.get('Running')
+
+ # Add container to groups
+ image_name = config.get('Image')
+ if image_name and add_legacy_groups:
+ self.inventory.add_group('image_{0}'.format(image_name))
+ self.inventory.add_host(name, group='image_{0}'.format(image_name))
+
+ stack_name = labels.get('com.docker.stack.namespace')
+ if stack_name:
+ full_facts['docker_stack'] = stack_name
+ if add_legacy_groups:
+ self.inventory.add_group('stack_{0}'.format(stack_name))
+ self.inventory.add_host(name, group='stack_{0}'.format(stack_name))
+
+ service_name = labels.get('com.docker.swarm.service.name')
+ if service_name:
+ full_facts['docker_service'] = service_name
+ if add_legacy_groups:
+ self.inventory.add_group('service_{0}'.format(service_name))
+ self.inventory.add_host(name, group='service_{0}'.format(service_name))
+
+ if connection_type == 'ssh':
+ # Figure out ssh IP and Port
+ try:
+ # Lookup the public facing port Nat'ed to ssh port.
+ port = client.port(container, ssh_port)[0]
+ except (IndexError, AttributeError, TypeError):
+ port = dict()
+
+ try:
+ ip = default_ip if port['HostIp'] == '0.0.0.0' else port['HostIp']
+ except KeyError:
+ ip = ''
+
+ facts.update(dict(
+ ansible_ssh_host=ip,
+ ansible_ssh_port=port.get('HostPort', 0),
+ ))
+ elif connection_type == 'docker-cli':
+ facts.update(dict(
+ ansible_host=full_name,
+ ansible_connection='community.docker.docker',
+ ))
+ elif connection_type == 'docker-api':
+ facts.update(dict(
+ ansible_host=full_name,
+ ansible_connection='community.docker.docker_api',
+ ))
+
+ full_facts.update(facts)
+ for key, value in inspect.items():
+ fact_key = self._slugify(key)
+ full_facts[fact_key] = value
+
+ if verbose_output:
+ facts.update(full_facts)
+
+ for key, value in facts.items():
+ self.inventory.set_variable(name, key, value)
+
+ # Use constructed if applicable
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), full_facts, name, strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), full_facts, name, strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), full_facts, name, strict=strict)
+
+ # We need to do this last since we also add a group called `name`.
+ # When we do this before a set_variable() call, the variables are assigned
+ # to the group, and not to the host.
+ if add_legacy_groups:
+ self.inventory.add_group(id)
+ self.inventory.add_host(name, group=id)
+ self.inventory.add_group(name)
+ self.inventory.add_host(name, group=name)
+ self.inventory.add_group(short_id)
+ self.inventory.add_host(name, group=short_id)
+ self.inventory.add_group(hostname)
+ self.inventory.add_host(name, group=hostname)
+
+ if running is True:
+ self.inventory.add_host(name, group='running')
+ else:
+ self.inventory.add_host(name, group='stopped')
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker.yaml', 'docker.yml')))
+
+ def _create_client(self):
+ return AnsibleDockerClient(self, min_docker_version=MIN_DOCKER_PY, min_docker_api_version=MIN_DOCKER_API)
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ client = self._create_client()
+ try:
+ self._populate(client)
+ except DockerException as e:
+ raise AnsibleError(
+ 'An unexpected docker error occurred: {0}'.format(e)
+ )
+ except RequestException as e:
+ raise AnsibleError(
+ 'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e)
+ )
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_machine.py b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_machine.py
new file mode 100644
index 00000000..7133ba96
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_machine.py
@@ -0,0 +1,271 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2019, Ximon Eighteen <ximon.eighteen@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_machine
+ author: Ximon Eighteen (@ximon18)
+ short_description: Docker Machine inventory source
+ requirements:
+ - L(Docker Machine,https://docs.docker.com/machine/)
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Get inventory hosts from Docker Machine.
+ - Uses a YAML configuration file that ends with docker_machine.(yml|yaml).
+ - The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key).
+ - The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables.
+
+ options:
+ plugin:
+ description: token that ensures this is a source file for the C(docker_machine) plugin.
+ required: yes
+ choices: ['docker_machine', 'community.docker.docker_machine']
+ daemon_env:
+ description:
+ - Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched.
+ - With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched.
+ A warning will be issued for any skipped host if the choice is C(require).
+ - With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched.
+ A warning will be issued for hosts where they cannot be fetched if the choice is C(optional).
+ - With C(skip), do not attempt to fetch the docker daemon connection environment variables.
+ - If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables.
+ type: str
+ choices:
+ - require
+ - require-silently
+ - optional
+ - optional-silently
+ - skip
+ default: require
+ running_required:
+ description: when true, hosts which Docker Machine indicates are in a state other than C(running) will be skipped.
+ type: bool
+ default: yes
+ verbose_output:
+ description: when true, include all available nodes metadata (e.g. Image, Region, Size) as a JSON object named C(docker_machine_node_attributes).
+ type: bool
+ default: yes
+'''
+
+EXAMPLES = '''
+# Minimal example
+plugin: community.docker.docker_machine
+
+# Example using constructed features to create a group per Docker Machine driver
+# (https://docs.docker.com/machine/drivers/), e.g.:
+# $ docker-machine create --driver digitalocean ... mymachine
+# $ ansible-inventory -i ./path/to/docker-machine.yml --host=mymachine
+# {
+# ...
+# "digitalocean": {
+# "hosts": [
+# "mymachine"
+# ]
+# ...
+# }
+strict: no
+keyed_groups:
+ - separator: ''
+ key: docker_machine_node_attributes.DriverName
+
+# Example grouping hosts by Digital Machine tag
+strict: no
+keyed_groups:
+ - prefix: tag
+ key: 'dm_tags'
+
+# Example using compose to override the default SSH behaviour of asking the user to accept the remote host key
+compose:
+ ansible_ssh_common_args: '"-o StrictHostKeyChecking=accept-new"'
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible.module_utils._text import to_text
+from ansible.module_utils.common.process import get_bin_path
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable
+from ansible.utils.display import Display
+
+import json
+import re
+import subprocess
+
+display = Display()
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable):
+ ''' Host inventory parser for ansible using Docker machine as source. '''
+
+ NAME = 'community.docker.docker_machine'
+
+ DOCKER_MACHINE_PATH = None
+
+ def _run_command(self, args):
+ if not self.DOCKER_MACHINE_PATH:
+ try:
+ self.DOCKER_MACHINE_PATH = get_bin_path('docker-machine')
+ except ValueError as e:
+ raise AnsibleError(to_native(e))
+
+ command = [self.DOCKER_MACHINE_PATH]
+ command.extend(args)
+ display.debug('Executing command {0}'.format(command))
+ try:
+ result = subprocess.check_output(command)
+ except subprocess.CalledProcessError as e:
+ display.warning('Exception {0} caught while executing command {1}, this was the original exception: {2}'.format(type(e).__name__, command, e))
+ raise e
+
+ return to_text(result).strip()
+
+ def _get_docker_daemon_variables(self, machine_name):
+ '''
+ Capture settings from Docker Machine that would be needed to connect to the remote Docker daemon installed on
+ the Docker Machine remote host. Note: passing '--shell=sh' is a workaround for 'Error: Unknown shell'.
+ '''
+ try:
+ env_lines = self._run_command(['env', '--shell=sh', machine_name]).splitlines()
+ except subprocess.CalledProcessError:
+ # This can happen when the machine is created but provisioning is incomplete
+ return []
+
+ # example output of docker-machine env --shell=sh:
+ # export DOCKER_TLS_VERIFY="1"
+ # export DOCKER_HOST="tcp://134.209.204.160:2376"
+ # export DOCKER_CERT_PATH="/root/.docker/machine/machines/routinator"
+ # export DOCKER_MACHINE_NAME="routinator"
+ # # Run this command to configure your shell:
+ # # eval $(docker-machine env --shell=bash routinator)
+
+ # capture any of the DOCKER_xxx variables that were output and create Ansible host vars
+ # with the same name and value but with a dm_ name prefix.
+ vars = []
+ for line in env_lines:
+ match = re.search('(DOCKER_[^=]+)="([^"]+)"', line)
+ if match:
+ env_var_name = match.group(1)
+ env_var_value = match.group(2)
+ vars.append((env_var_name, env_var_value))
+
+ return vars
+
+ def _get_machine_names(self):
+ # Filter out machines that are not in the Running state as we probably can't do anything useful actions
+ # with them.
+ ls_command = ['ls', '-q']
+ if self.get_option('running_required'):
+ ls_command.extend(['--filter', 'state=Running'])
+
+ try:
+ ls_lines = self._run_command(ls_command)
+ except subprocess.CalledProcessError:
+ return []
+
+ return ls_lines.splitlines()
+
+ def _inspect_docker_machine_host(self, node):
+ try:
+ inspect_lines = self._run_command(['inspect', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return json.loads(inspect_lines)
+
+ def _ip_addr_docker_machine_host(self, node):
+ try:
+ ip_addr = self._run_command(['ip', self.node])
+ except subprocess.CalledProcessError:
+ return None
+
+ return ip_addr
+
+ def _should_skip_host(self, machine_name, env_var_tuples, daemon_env):
+ if not env_var_tuples:
+ warning_prefix = 'Unable to fetch Docker daemon env vars from Docker Machine for host {0}'.format(machine_name)
+ if daemon_env in ('require', 'require-silently'):
+ if daemon_env == 'require':
+ display.warning('{0}: host will be skipped'.format(warning_prefix))
+ return True
+ else: # 'optional', 'optional-silently'
+ if daemon_env == 'optional':
+ display.warning('{0}: host will lack dm_DOCKER_xxx variables'.format(warning_prefix))
+ return False
+
+ def _populate(self):
+ daemon_env = self.get_option('daemon_env')
+ try:
+ for self.node in self._get_machine_names():
+ self.node_attrs = self._inspect_docker_machine_host(self.node)
+ if not self.node_attrs:
+ continue
+
+ machine_name = self.node_attrs['Driver']['MachineName']
+
+ # query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands
+ # that could be used to set environment variables to influence a local Docker client:
+ if daemon_env == 'skip':
+ env_var_tuples = []
+ else:
+ env_var_tuples = self._get_docker_daemon_variables(machine_name)
+ if self._should_skip_host(machine_name, env_var_tuples, daemon_env):
+ continue
+
+ # add an entry in the inventory for this host
+ self.inventory.add_host(machine_name)
+
+ # check for valid ip address from inspect output, else explicitly use ip command to find host ip address
+ # this works around an issue seen with Google Compute Platform where the IP address was not available
+ # via the 'inspect' subcommand but was via the 'ip' subcomannd.
+ if self.node_attrs['Driver']['IPAddress']:
+ ip_addr = self.node_attrs['Driver']['IPAddress']
+ else:
+ ip_addr = self._ip_addr_docker_machine_host(self.node)
+
+ # set standard Ansible remote host connection settings to details captured from `docker-machine`
+ # see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html
+ self.inventory.set_variable(machine_name, 'ansible_host', ip_addr)
+ self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort'])
+ self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser'])
+ self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath'])
+
+ # set variables based on Docker Machine tags
+ tags = self.node_attrs['Driver'].get('Tags') or ''
+ self.inventory.set_variable(machine_name, 'dm_tags', tags)
+
+ # set variables based on Docker Machine env variables
+ for kv in env_var_tuples:
+ self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1])
+
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs)
+
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict)
+
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict)
+
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict)
+
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' %
+ to_native(e), orig_exc=e)
+
+ def verify_file(self, path):
+ """Return the possibility of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_machine.yaml', 'docker_machine.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_swarm.py b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_swarm.py
new file mode 100644
index 00000000..65c5f719
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/inventory/docker_swarm.py
@@ -0,0 +1,254 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) 2018, Stefan Heitmueller <stefan.heitmueller@gmx.com>
+# Copyright (c) 2018 Ansible Project
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+ name: docker_swarm
+ author:
+ - Stefan Heitmüller (@morph027) <stefan.heitmueller@gmx.com>
+ short_description: Ansible dynamic inventory plugin for Docker swarm nodes.
+ requirements:
+ - python >= 2.7
+ - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0
+ extends_documentation_fragment:
+ - constructed
+ description:
+ - Reads inventories from the Docker swarm API.
+ - Uses a YAML configuration file docker_swarm.[yml|yaml].
+ - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes;
+ I(managers) - all manager nodes; I(leader) - the swarm leader node;
+ I(nonleaders) - all nodes except the swarm leader."
+ options:
+ plugin:
+ description: The name of this plugin, it should always be set to C(community.docker.docker_swarm)
+ for this plugin to recognize it as it's own.
+ type: str
+ required: true
+ choices: [ docker_swarm, community.docker.docker_swarm ]
+ docker_host:
+ description:
+ - Socket of a Docker swarm manager node (C(tcp), C(unix)).
+ - "Use C(unix://var/run/docker.sock) to connect via local socket."
+ type: str
+ required: true
+ aliases: [ docker_url ]
+ verbose_output:
+ description: Toggle to (not) include all available nodes metadata (e.g. C(Platform), C(Architecture), C(OS),
+ C(EngineVersion))
+ type: bool
+ default: yes
+ tls:
+ description: Connect using TLS without verifying the authenticity of the Docker host server.
+ type: bool
+ default: no
+ validate_certs:
+ description: Toggle if connecting using TLS with or without verifying the authenticity of the Docker
+ host server.
+ type: bool
+ default: no
+ aliases: [ tls_verify ]
+ client_key:
+ description: Path to the client's TLS key file.
+ type: path
+ aliases: [ tls_client_key, key_path ]
+ ca_cert:
+ description: Use a CA certificate when performing server verification by providing the path to a CA
+ certificate file.
+ type: path
+ aliases: [ tls_ca_cert, cacert_path ]
+ client_cert:
+ description: Path to the client's TLS certificate file.
+ type: path
+ aliases: [ tls_client_cert, cert_path ]
+ tls_hostname:
+ description: When verifying the authenticity of the Docker host server, provide the expected name of
+ the server.
+ type: str
+ ssl_version:
+ description: Provide a valid SSL version number. Default value determined by ssl.py module.
+ type: str
+ api_version:
+ description:
+ - The version of the Docker API running on the Docker Host.
+ - Defaults to the latest version of the API supported by docker-py.
+ type: str
+ aliases: [ docker_api_version ]
+ timeout:
+ description:
+ - The maximum amount of time in seconds to wait on a response from the API.
+ - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT)
+ will be used instead. If the environment variable is not set, the default value will be used.
+ type: int
+ default: 60
+ aliases: [ time_out ]
+ include_host_uri:
+ description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the
+ swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional
+ modification as value of option I(docker_host) in Docker Swarm modules when connecting via API.
+ The port always defaults to C(2376).
+ type: bool
+ default: no
+ include_host_uri_port:
+ description: Override the detected port number included in I(ansible_host_uri)
+ type: int
+'''
+
+EXAMPLES = '''
+# Minimal example using local docker
+plugin: community.docker.docker_swarm
+docker_host: unix://var/run/docker.sock
+
+# Minimal example using remote docker
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2375
+
+# Example using remote docker with unverified TLS
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2376
+tls: yes
+
+# Example using remote docker with verified TLS and client certificate verification
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2376
+validate_certs: yes
+ca_cert: /somewhere/ca.pem
+client_key: /somewhere/key.pem
+client_cert: /somewhere/cert.pem
+
+# Example using constructed features to create groups and set ansible_host
+plugin: community.docker.docker_swarm
+docker_host: tcp://my-docker-host:2375
+strict: False
+keyed_groups:
+ # add e.g. x86_64 hosts to an arch_x86_64 group
+ - prefix: arch
+ key: 'Description.Platform.Architecture'
+ # add e.g. linux hosts to an os_linux group
+ - prefix: os
+ key: 'Description.Platform.OS'
+ # create a group per node label
+ # e.g. a node labeled w/ "production" ends up in group "label_production"
+ # hint: labels containing special characters will be converted to safe names
+ - key: 'Spec.Labels'
+ prefix: label
+'''
+
+from ansible.errors import AnsibleError
+from ansible.module_utils._text import to_native
+from ansible_collections.community.docker.plugins.module_utils.common import update_tls_hostname, get_connect_params
+from ansible.plugins.inventory import BaseInventoryPlugin, Constructable
+from ansible.parsing.utils.addresses import parse_address
+
+try:
+ import docker
+ HAS_DOCKER = True
+except ImportError:
+ HAS_DOCKER = False
+
+
+class InventoryModule(BaseInventoryPlugin, Constructable):
+ ''' Host inventory parser for ansible using Docker swarm as source. '''
+
+ NAME = 'community.docker.docker_swarm'
+
+ def _fail(self, msg):
+ raise AnsibleError(msg)
+
+ def _populate(self):
+ raw_params = dict(
+ docker_host=self.get_option('docker_host'),
+ tls=self.get_option('tls'),
+ tls_verify=self.get_option('validate_certs'),
+ key_path=self.get_option('client_key'),
+ cacert_path=self.get_option('ca_cert'),
+ cert_path=self.get_option('client_cert'),
+ tls_hostname=self.get_option('tls_hostname'),
+ api_version=self.get_option('api_version'),
+ timeout=self.get_option('timeout'),
+ ssl_version=self.get_option('ssl_version'),
+ debug=None,
+ )
+ update_tls_hostname(raw_params)
+ connect_params = get_connect_params(raw_params, fail_function=self._fail)
+ self.client = docker.DockerClient(**connect_params)
+ self.inventory.add_group('all')
+ self.inventory.add_group('manager')
+ self.inventory.add_group('worker')
+ self.inventory.add_group('leader')
+ self.inventory.add_group('nonleaders')
+
+ if self.get_option('include_host_uri'):
+ if self.get_option('include_host_uri_port'):
+ host_uri_port = str(self.get_option('include_host_uri_port'))
+ elif self.get_option('tls') or self.get_option('validate_certs'):
+ host_uri_port = '2376'
+ else:
+ host_uri_port = '2375'
+
+ try:
+ self.nodes = self.client.nodes.list()
+ for self.node in self.nodes:
+ self.node_attrs = self.client.nodes.get(self.node.id).attrs
+ self.inventory.add_host(self.node_attrs['ID'])
+ self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role'])
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host',
+ self.node_attrs['Status']['Addr'])
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port)
+ if self.get_option('verbose_output'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs)
+ if 'ManagerStatus' in self.node_attrs:
+ if self.node_attrs['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \
+ self.node_attrs['Status']['Addr']
+ if self.get_option('include_host_uri'):
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri',
+ 'tcp://' + swarm_leader_ip + ':' + host_uri_port)
+ self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip)
+ self.inventory.add_host(self.node_attrs['ID'], group='leader')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ else:
+ self.inventory.add_host(self.node_attrs['ID'], group='nonleaders')
+ # Use constructed if applicable
+ strict = self.get_option('strict')
+ # Composed variables
+ self._set_composite_vars(self.get_option('compose'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group
+ self._add_host_to_composed_groups(self.get_option('groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ # Create groups based on variable values and add the corresponding hosts to it
+ self._add_host_to_keyed_groups(self.get_option('keyed_groups'),
+ self.node_attrs,
+ self.node_attrs['ID'],
+ strict=strict)
+ except Exception as e:
+ raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' %
+ to_native(e))
+
+ def verify_file(self, path):
+ """Return the possibly of a file being consumable by this plugin."""
+ return (
+ super(InventoryModule, self).verify_file(path) and
+ path.endswith(('docker_swarm.yaml', 'docker_swarm.yml')))
+
+ def parse(self, inventory, loader, path, cache=True):
+ if not HAS_DOCKER:
+ raise AnsibleError('The Docker swarm dynamic inventory plugin requires the Docker SDK for Python: '
+ 'https://github.com/docker/docker-py.')
+ super(InventoryModule, self).parse(inventory, loader, path, cache)
+ self._read_config_data(path)
+ self._populate()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/__init__.py b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/__init__.py
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/common.py b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/common.py
new file mode 100644
index 00000000..c828b879
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/common.py
@@ -0,0 +1,1022 @@
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import abc
+import os
+import platform
+import re
+import sys
+from datetime import timedelta
+from distutils.version import LooseVersion
+
+
+from ansible.module_utils.basic import AnsibleModule, env_fallback, missing_required_lib
+from ansible.module_utils.common._collections_compat import Mapping, Sequence
+from ansible.module_utils.six import string_types
+from ansible.module_utils.six.moves.urllib.parse import urlparse
+from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE, BOOLEANS_FALSE
+
+HAS_DOCKER_PY = True
+HAS_DOCKER_PY_2 = False
+HAS_DOCKER_PY_3 = False
+HAS_DOCKER_ERROR = None
+
+try:
+ from requests.exceptions import SSLError
+ from docker import __version__ as docker_version
+ from docker.errors import APIError, NotFound, TLSParameterError
+ from docker.tls import TLSConfig
+ from docker import auth
+
+ if LooseVersion(docker_version) >= LooseVersion('3.0.0'):
+ HAS_DOCKER_PY_3 = True
+ from docker import APIClient as Client
+ elif LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ HAS_DOCKER_PY_2 = True
+ from docker import APIClient as Client
+ else:
+ from docker import Client
+
+except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ HAS_DOCKER_PY = False
+
+
+# The next 2 imports ``docker.models`` and ``docker.ssladapter`` are used
+# to ensure the user does not have both ``docker`` and ``docker-py`` modules
+# installed, as they utilize the same namespace are are incompatible
+try:
+ # docker (Docker SDK for Python >= 2.0.0)
+ import docker.models # noqa: F401
+ HAS_DOCKER_MODELS = True
+except ImportError:
+ HAS_DOCKER_MODELS = False
+
+try:
+ # docker-py (Docker SDK for Python < 2.0.0)
+ import docker.ssladapter # noqa: F401
+ HAS_DOCKER_SSLADAPTER = True
+except ImportError:
+ HAS_DOCKER_SSLADAPTER = False
+
+
+try:
+ from requests.exceptions import RequestException
+except ImportError:
+ # Either docker-py is no longer using requests, or docker-py isn't around either,
+ # or docker-py's dependency requests is missing. In any case, define an exception
+ # class RequestException so that our code doesn't break.
+ class RequestException(Exception):
+ pass
+
+
+DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock'
+DEFAULT_TLS = False
+DEFAULT_TLS_VERIFY = False
+DEFAULT_TLS_HOSTNAME = 'localhost'
+MIN_DOCKER_VERSION = "1.8.0"
+DEFAULT_TIMEOUT_SECONDS = 60
+
+DOCKER_COMMON_ARGS = dict(
+ docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']),
+ tls_hostname=dict(type='str', default=DEFAULT_TLS_HOSTNAME, fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])),
+ api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])),
+ ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']),
+ client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']),
+ client_key=dict(type='path', aliases=['tls_client_key', 'key_path']),
+ ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])),
+ tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])),
+ validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']),
+ debug=dict(type='bool', default=False)
+)
+
+DOCKER_MUTUALLY_EXCLUSIVE = []
+
+DOCKER_REQUIRED_TOGETHER = [
+ ['client_cert', 'client_key']
+]
+
+DEFAULT_DOCKER_REGISTRY = 'https://index.docker.io/v1/'
+EMAIL_REGEX = r'[^@]+@[^@]+\.[^@]+'
+BYTE_SUFFIXES = ['B', 'KB', 'MB', 'GB', 'TB', 'PB']
+
+
+if not HAS_DOCKER_PY:
+ docker_version = None
+
+ # No Docker SDK for Python. Create a place holder client to allow
+ # instantiation of AnsibleModule and proper error handing
+ class Client(object): # noqa: F811
+ def __init__(self, **kwargs):
+ pass
+
+ class APIError(Exception): # noqa: F811
+ pass
+
+ class NotFound(Exception): # noqa: F811
+ pass
+
+
+def is_image_name_id(name):
+ """Check whether the given image name is in fact an image ID (hash)."""
+ if re.match('^sha256:[0-9a-fA-F]{64}$', name):
+ return True
+ return False
+
+
+def is_valid_tag(tag, allow_empty=False):
+ """Check whether the given string is a valid docker tag name."""
+ if not tag:
+ return allow_empty
+ # See here ("Extended description") for a definition what tags can be:
+ # https://docs.docker.com/engine/reference/commandline/tag/
+ return bool(re.match('^[a-zA-Z0-9_][a-zA-Z0-9_.-]{0,127}$', tag))
+
+
+def sanitize_result(data):
+ """Sanitize data object for return to Ansible.
+
+ When the data object contains types such as docker.types.containers.HostConfig,
+ Ansible will fail when these are returned via exit_json or fail_json.
+ HostConfig is derived from dict, but its constructor requires additional
+ arguments. This function sanitizes data structures by recursively converting
+ everything derived from dict to dict and everything derived from list (and tuple)
+ to a list.
+ """
+ if isinstance(data, dict):
+ return dict((k, sanitize_result(v)) for k, v in data.items())
+ elif isinstance(data, (list, tuple)):
+ return [sanitize_result(v) for v in data]
+ else:
+ return data
+
+
+class DockerBaseClass(object):
+ def __init__(self):
+ self.debug = False
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+
+def update_tls_hostname(result):
+ if result['tls_hostname'] is None:
+ # get default machine name from the url
+ parsed_url = urlparse(result['docker_host'])
+ if ':' in parsed_url.netloc:
+ result['tls_hostname'] = parsed_url.netloc[:parsed_url.netloc.rindex(':')]
+ else:
+ result['tls_hostname'] = parsed_url
+
+
+def _get_tls_config(fail_function, **kwargs):
+ try:
+ tls_config = TLSConfig(**kwargs)
+ return tls_config
+ except TLSParameterError as exc:
+ fail_function("TLS config error: %s" % exc)
+
+
+def get_connect_params(auth, fail_function):
+ if auth['tls'] or auth['tls_verify']:
+ auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://')
+
+ if auth['tls_verify'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and host verification
+ if auth['cacert_path']:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ ca_cert=auth['cacert_path'],
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ else:
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify'] and auth['cacert_path']:
+ # TLS with cacert only
+ tls_config = _get_tls_config(ca_cert=auth['cacert_path'],
+ assert_hostname=auth['tls_hostname'],
+ verify=True,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls_verify']:
+ # TLS with verify and no certs
+ tls_config = _get_tls_config(verify=True,
+ assert_hostname=auth['tls_hostname'],
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls'] and auth['cert_path'] and auth['key_path']:
+ # TLS with certs and no host verification
+ tls_config = _get_tls_config(client_cert=(auth['cert_path'], auth['key_path']),
+ verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ if auth['tls']:
+ # TLS with no certs and not host verification
+ tls_config = _get_tls_config(verify=False,
+ ssl_version=auth['ssl_version'],
+ fail_function=fail_function)
+ return dict(base_url=auth['docker_host'],
+ tls=tls_config,
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+ # No TLS
+ return dict(base_url=auth['docker_host'],
+ version=auth['api_version'],
+ timeout=auth['timeout'])
+
+
+DOCKERPYUPGRADE_SWITCH_TO_DOCKER = "Try `pip uninstall docker-py` followed by `pip install docker`."
+DOCKERPYUPGRADE_UPGRADE_DOCKER = "Use `pip install --upgrade docker` to upgrade."
+DOCKERPYUPGRADE_RECOMMEND_DOCKER = ("Use `pip install --upgrade docker-py` to upgrade. "
+ "Hint: if you do not need Python 2.6 support, try "
+ "`pip uninstall docker-py` instead, followed by `pip install docker`.")
+
+
+class AnsibleDockerClientBase(Client):
+ def __init__(self, min_docker_version=None, min_docker_api_version=None):
+ if min_docker_version is None:
+ min_docker_version = MIN_DOCKER_VERSION
+ NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0'))
+
+ self.docker_py_version = LooseVersion(docker_version)
+
+ if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER:
+ self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker "
+ "SDK for Python) installed together as they use the same namespace and cause a corrupt "
+ "installation. Please uninstall both packages, and re-install only the docker-py or docker "
+ "python module (for %s's Python %s). It is recommended to install the docker module if no "
+ "support for Python 2.6 is required. Please note that simply uninstalling one of the modules "
+ "can leave the other module in a broken state." % (platform.node(), sys.executable))
+
+ if not HAS_DOCKER_PY:
+ if NEEDS_DOCKER_PY2:
+ msg = missing_required_lib("Docker SDK for Python: docker")
+ msg = msg + ", for example via `pip install docker`. The error was: %s"
+ else:
+ msg = missing_required_lib("Docker SDK for Python: docker (Python >= 2.7) or docker-py (Python 2.6)")
+ msg = msg + ", for example via `pip install docker` or `pip install docker-py` (Python 2.6). The error was: %s"
+ self.fail(msg % HAS_DOCKER_ERROR)
+
+ if self.docker_py_version < LooseVersion(min_docker_version):
+ msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s."
+ if not NEEDS_DOCKER_PY2:
+ # The minimal required version is < 2.0 (and the current version as well).
+ # Advertise docker (instead of docker-py) for non-Python-2.6 users.
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif docker_version < LooseVersion('2.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version))
+
+ self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail)
+
+ try:
+ super(AnsibleDockerClientBase, self).__init__(**self._connect_params)
+ self.docker_api_version_str = self.version()['ApiVersion']
+ except APIError as exc:
+ self.fail("Docker API error: %s" % exc)
+ except Exception as exc:
+ self.fail("Error connecting: %s" % exc)
+
+ self.docker_api_version = LooseVersion(self.docker_api_version_str)
+ if min_docker_api_version is not None:
+ if self.docker_api_version < LooseVersion(min_docker_api_version):
+ self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
+
+ def log(self, msg, pretty_print=False):
+ pass
+ # if self.debug:
+ # log_file = open('docker.log', 'a')
+ # if pretty_print:
+ # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': ')))
+ # log_file.write(u'\n')
+ # else:
+ # log_file.write(msg + u'\n')
+
+ @abc.abstractmethod
+ def fail(self, msg, **kwargs):
+ pass
+
+ @staticmethod
+ def _get_value(param_name, param_value, env_variable, default_value):
+ if param_value is not None:
+ # take module parameter value
+ if param_value in BOOLEANS_TRUE:
+ return True
+ if param_value in BOOLEANS_FALSE:
+ return False
+ return param_value
+
+ if env_variable is not None:
+ env_value = os.environ.get(env_variable)
+ if env_value is not None:
+ # take the env variable value
+ if param_name == 'cert_path':
+ return os.path.join(env_value, 'cert.pem')
+ if param_name == 'cacert_path':
+ return os.path.join(env_value, 'ca.pem')
+ if param_name == 'key_path':
+ return os.path.join(env_value, 'key.pem')
+ if env_value in BOOLEANS_TRUE:
+ return True
+ if env_value in BOOLEANS_FALSE:
+ return False
+ return env_value
+
+ # take the default
+ return default_value
+
+ @abc.abstractmethod
+ def _get_params(self):
+ pass
+
+ @property
+ def auth_params(self):
+ # Get authentication credentials.
+ # Precedence: module parameters-> environment variables-> defaults.
+
+ self.log('Getting credentials')
+
+ client_params = self._get_params()
+
+ params = dict()
+ for key in DOCKER_COMMON_ARGS:
+ params[key] = client_params.get(key)
+
+ result = dict(
+ docker_host=self._get_value('docker_host', params['docker_host'], 'DOCKER_HOST',
+ DEFAULT_DOCKER_HOST),
+ tls_hostname=self._get_value('tls_hostname', params['tls_hostname'],
+ 'DOCKER_TLS_HOSTNAME', DEFAULT_TLS_HOSTNAME),
+ api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION',
+ 'auto'),
+ cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None),
+ cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None),
+ key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None),
+ ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None),
+ tls=self._get_value('tls', params['tls'], 'DOCKER_TLS', DEFAULT_TLS),
+ tls_verify=self._get_value('tls_verfy', params['validate_certs'], 'DOCKER_TLS_VERIFY',
+ DEFAULT_TLS_VERIFY),
+ timeout=self._get_value('timeout', params['timeout'], 'DOCKER_TIMEOUT',
+ DEFAULT_TIMEOUT_SECONDS),
+ )
+
+ update_tls_hostname(result)
+
+ return result
+
+ def _handle_ssl_error(self, error):
+ match = re.match(r"hostname.*doesn\'t match (\'.*\')", str(error))
+ if match:
+ self.fail("You asked for verification that Docker daemons certificate's hostname matches %s. "
+ "The actual certificate's hostname is %s. Most likely you need to set DOCKER_TLS_HOSTNAME "
+ "or pass `tls_hostname` with a value of %s. You may also use TLS without verification by "
+ "setting the `tls` parameter to true."
+ % (self.auth_params['tls_hostname'], match.group(1), match.group(1)))
+ self.fail("SSL Exception: %s" % (error))
+
+ def get_container_by_id(self, container_id):
+ try:
+ self.log("Inspecting container Id %s" % container_id)
+ result = self.inspect_container(container=container_id)
+ self.log("Completed container inspection")
+ return result
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting container: %s" % exc)
+
+ def get_container(self, name=None):
+ '''
+ Lookup a container and return the inspection results.
+ '''
+ if name is None:
+ return None
+
+ search_name = name
+ if not name.startswith('/'):
+ search_name = '/' + name
+
+ result = None
+ try:
+ for container in self.containers(all=True):
+ self.log("testing container: %s" % (container['Names']))
+ if isinstance(container['Names'], list) and search_name in container['Names']:
+ result = container
+ break
+ if container['Id'].startswith(name):
+ result = container
+ break
+ if container['Id'] == name:
+ result = container
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving container list: %s" % exc)
+
+ if result is None:
+ return None
+
+ return self.get_container_by_id(result['Id'])
+
+ def get_network(self, name=None, network_id=None):
+ '''
+ Lookup a network and return the inspection results.
+ '''
+ if name is None and network_id is None:
+ return None
+
+ result = None
+
+ if network_id is None:
+ try:
+ for network in self.networks():
+ self.log("testing network: %s" % (network['Name']))
+ if name == network['Name']:
+ result = network
+ break
+ if network['Id'].startswith(name):
+ result = network
+ break
+ except SSLError as exc:
+ self._handle_ssl_error(exc)
+ except Exception as exc:
+ self.fail("Error retrieving network list: %s" % exc)
+
+ if result is not None:
+ network_id = result['Id']
+
+ if network_id is not None:
+ try:
+ self.log("Inspecting network Id %s" % network_id)
+ result = self.inspect_network(network_id)
+ self.log("Completed network inspection")
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ self.fail("Error inspecting network: %s" % exc)
+
+ return result
+
+ def find_image(self, name, tag):
+ '''
+ Lookup an image (by name and tag) and return the inspection results.
+ '''
+ if not name:
+ return None
+
+ self.log("Find image %s:%s" % (name, tag))
+ images = self._image_lookup(name, tag)
+ if not images:
+ # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub
+ registry, repo_name = auth.resolve_repository_name(name)
+ if registry == 'docker.io':
+ # If docker.io is explicitly there in name, the image
+ # isn't found in some cases (#41509)
+ self.log("Check for docker.io image: %s" % repo_name)
+ images = self._image_lookup(repo_name, tag)
+ if not images and repo_name.startswith('library/'):
+ # Sometimes library/xxx images are not found
+ lookup = repo_name[len('library/'):]
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+ if not images:
+ # Last case: if docker.io wasn't there, it can be that
+ # the image wasn't found either (#15586)
+ lookup = "%s/%s" % (registry, repo_name)
+ self.log("Check for docker.io image: %s" % lookup)
+ images = self._image_lookup(lookup, tag)
+
+ if len(images) > 1:
+ self.fail("Registry returned more than one result for %s:%s" % (name, tag))
+
+ if len(images) == 1:
+ try:
+ inspection = self.inspect_image(images[0]['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s:%s - %s" % (name, tag, str(exc)))
+ return inspection
+
+ self.log("Image %s:%s not found." % (name, tag))
+ return None
+
+ def find_image_by_id(self, image_id):
+ '''
+ Lookup an image (by ID) and return the inspection results.
+ '''
+ if not image_id:
+ return None
+
+ self.log("Find image %s (by ID)" % image_id)
+ try:
+ inspection = self.inspect_image(image_id)
+ except Exception as exc:
+ self.fail("Error inspecting image ID %s - %s" % (image_id, str(exc)))
+ return inspection
+
+ def _image_lookup(self, name, tag):
+ '''
+ Including a tag in the name parameter sent to the Docker SDK for Python images method
+ does not work consistently. Instead, get the result set for name and manually check
+ if the tag exists.
+ '''
+ try:
+ response = self.images(name=name)
+ except Exception as exc:
+ self.fail("Error searching for image %s - %s" % (name, str(exc)))
+ images = response
+ if tag:
+ lookup = "%s:%s" % (name, tag)
+ lookup_digest = "%s@%s" % (name, tag)
+ images = []
+ for image in response:
+ tags = image.get('RepoTags')
+ digests = image.get('RepoDigests')
+ if (tags and lookup in tags) or (digests and lookup_digest in digests):
+ images = [image]
+ break
+ return images
+
+ def pull_image(self, name, tag="latest"):
+ '''
+ Pull an image
+ '''
+ self.log("Pulling image %s:%s" % (name, tag))
+ old_tag = self.find_image(name, tag)
+ try:
+ for line in self.pull(name, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('error'):
+ if line.get('errorDetail'):
+ error_detail = line.get('errorDetail')
+ self.fail("Error pulling %s - code: %s message: %s" % (name,
+ error_detail.get('code'),
+ error_detail.get('message')))
+ else:
+ self.fail("Error pulling %s - %s" % (name, line.get('error')))
+ except Exception as exc:
+ self.fail("Error pulling image %s:%s - %s" % (name, tag, str(exc)))
+
+ new_tag = self.find_image(name, tag)
+
+ return new_tag, old_tag == new_tag
+
+ def inspect_distribution(self, image, **kwargs):
+ '''
+ Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0
+ since prior versions did not support accessing private repositories.
+ '''
+ if self.docker_py_version < LooseVersion('4.0.0'):
+ registry = auth.resolve_repository_name(image)[0]
+ header = auth.get_config_header(self, registry)
+ if header:
+ return self._result(self._get(
+ self._url('/distribution/{0}/json', image),
+ headers={'X-Registry-Auth': header}
+ ), json=True)
+ return super(AnsibleDockerClientBase, self).inspect_distribution(image, **kwargs)
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+
+ def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None,
+ required_together=None, required_if=None, min_docker_version=None,
+ min_docker_api_version=None, option_minimal_versions=None,
+ option_minimal_versions_ignore_params=None, fail_results=None):
+
+ # Modules can put information in here which will always be returned
+ # in case client.fail() is called.
+ self.fail_results = fail_results or {}
+
+ merged_arg_spec = dict()
+ merged_arg_spec.update(DOCKER_COMMON_ARGS)
+ if argument_spec:
+ merged_arg_spec.update(argument_spec)
+ self.arg_spec = merged_arg_spec
+
+ mutually_exclusive_params = []
+ mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE
+ if mutually_exclusive:
+ mutually_exclusive_params += mutually_exclusive
+
+ required_together_params = []
+ required_together_params += DOCKER_REQUIRED_TOGETHER
+ if required_together:
+ required_together_params += required_together
+
+ self.module = AnsibleModule(
+ argument_spec=merged_arg_spec,
+ supports_check_mode=supports_check_mode,
+ mutually_exclusive=mutually_exclusive_params,
+ required_together=required_together_params,
+ required_if=required_if)
+
+ self.debug = self.module.params.get('debug')
+ self.check_mode = self.module.check_mode
+
+ super(AnsibleDockerClient, self).__init__(
+ min_docker_version=min_docker_version,
+ min_docker_api_version=min_docker_api_version)
+
+ if option_minimal_versions is not None:
+ self._get_minimal_versions(option_minimal_versions, option_minimal_versions_ignore_params)
+
+ def fail(self, msg, **kwargs):
+ self.fail_results.update(kwargs)
+ self.module.fail_json(msg=msg, **sanitize_result(self.fail_results))
+
+ def _get_params(self):
+ return self.module.params
+
+ def _get_minimal_versions(self, option_minimal_versions, ignore_params=None):
+ self.option_minimal_versions = dict()
+ for option in self.module.argument_spec:
+ if ignore_params is not None:
+ if option in ignore_params:
+ continue
+ self.option_minimal_versions[option] = dict()
+ self.option_minimal_versions.update(option_minimal_versions)
+
+ for option, data in self.option_minimal_versions.items():
+ # Test whether option is supported, and store result
+ support_docker_py = True
+ support_docker_api = True
+ if 'docker_py_version' in data:
+ support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version'])
+ if 'docker_api_version' in data:
+ support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version'])
+ data['supported'] = support_docker_py and support_docker_api
+ # Fail if option is not supported but used
+ if not data['supported']:
+ # Test whether option is specified
+ if 'detect_usage' in data:
+ used = data['detect_usage'](self)
+ else:
+ used = self.module.params.get(option) is not None
+ if used and 'default' in self.module.argument_spec[option]:
+ used = self.module.params[option] != self.module.argument_spec[option]['default']
+ if used:
+ # If the option is used, compose error message.
+ if 'usage_msg' in data:
+ usg = data['usage_msg']
+ else:
+ usg = 'set %s option' % (option, )
+ if not support_docker_api:
+ msg = 'Docker API version is %s. Minimum version required is %s to %s.'
+ msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg)
+ elif not support_docker_py:
+ msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. "
+ if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER
+ elif self.docker_py_version < LooseVersion('2.0.0'):
+ msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER
+ else:
+ msg += DOCKERPYUPGRADE_UPGRADE_DOCKER
+ msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg)
+ else:
+ # should not happen
+ msg = 'Cannot %s with your configuration.' % (usg, )
+ self.fail(msg)
+
+ def report_warnings(self, result, warnings_key=None):
+ '''
+ Checks result of client operation for warnings, and if present, outputs them.
+
+ warnings_key should be a list of keys used to crawl the result dictionary.
+ For example, if warnings_key == ['a', 'b'], the function will consider
+ result['a']['b'] if these keys exist. If the result is a non-empty string, it
+ will be reported as a warning. If the result is a list, every entry will be
+ reported as a warning.
+
+ In most cases (if warnings are returned at all), warnings_key should be
+ ['Warnings'] or ['Warning']. The default value (if not specified) is ['Warnings'].
+ '''
+ if warnings_key is None:
+ warnings_key = ['Warnings']
+ for key in warnings_key:
+ if not isinstance(result, Mapping):
+ return
+ result = result.get(key)
+ if isinstance(result, Sequence):
+ for warning in result:
+ self.module.warn('Docker warning: {0}'.format(warning))
+ elif isinstance(result, string_types) and result:
+ self.module.warn('Docker warning: {0}'.format(result))
+
+
+def compare_dict_allow_more_present(av, bv):
+ '''
+ Compare two dictionaries for whether every entry of the first is in the second.
+ '''
+ for key, value in av.items():
+ if key not in bv:
+ return False
+ if bv[key] != value:
+ return False
+ return True
+
+
+def compare_generic(a, b, method, datatype):
+ '''
+ Compare values a and b as described by method and datatype.
+
+ Returns ``True`` if the values compare equal, and ``False`` if not.
+
+ ``a`` is usually the module's parameter, while ``b`` is a property
+ of the current object. ``a`` must not be ``None`` (except for
+ ``datatype == 'value'``).
+
+ Valid values for ``method`` are:
+ - ``ignore`` (always compare as equal);
+ - ``strict`` (only compare if really equal)
+ - ``allow_more_present`` (allow b to have elements which a does not have).
+
+ Valid values for ``datatype`` are:
+ - ``value``: for simple values (strings, numbers, ...);
+ - ``list``: for ``list``s or ``tuple``s where order matters;
+ - ``set``: for ``list``s, ``tuple``s or ``set``s where order does not
+ matter;
+ - ``set(dict)``: for ``list``s, ``tuple``s or ``sets`` where order does
+ not matter and which contain ``dict``s; ``allow_more_present`` is used
+ for the ``dict``s, and these are assumed to be dictionaries of values;
+ - ``dict``: for dictionaries of values.
+ '''
+ if method == 'ignore':
+ return True
+ # If a or b is None:
+ if a is None or b is None:
+ # If both are None: equality
+ if a == b:
+ return True
+ # Otherwise, not equal for values, and equal
+ # if the other is empty for set/list/dict
+ if datatype == 'value':
+ return False
+ # For allow_more_present, allow a to be None
+ if method == 'allow_more_present' and a is None:
+ return True
+ # Otherwise, the iterable object which is not None must have length 0
+ return len(b if a is None else a) == 0
+ # Do proper comparison (both objects not None)
+ if datatype == 'value':
+ return a == b
+ elif datatype == 'list':
+ if method == 'strict':
+ return a == b
+ else:
+ i = 0
+ for v in a:
+ while i < len(b) and b[i] != v:
+ i += 1
+ if i == len(b):
+ return False
+ i += 1
+ return True
+ elif datatype == 'dict':
+ if method == 'strict':
+ return a == b
+ else:
+ return compare_dict_allow_more_present(a, b)
+ elif datatype == 'set':
+ set_a = set(a)
+ set_b = set(b)
+ if method == 'strict':
+ return set_a == set_b
+ else:
+ return set_b >= set_a
+ elif datatype == 'set(dict)':
+ for av in a:
+ found = False
+ for bv in b:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ if method == 'strict':
+ # If we would know that both a and b do not contain duplicates,
+ # we could simply compare len(a) to len(b) to finish this test.
+ # We can assume that b has no duplicates (as it is returned by
+ # docker), but we don't know for a.
+ for bv in b:
+ found = False
+ for av in a:
+ if compare_dict_allow_more_present(av, bv):
+ found = True
+ break
+ if not found:
+ return False
+ return True
+
+
+class DifferenceTracker(object):
+ def __init__(self):
+ self._diff = []
+
+ def add(self, name, parameter=None, active=None):
+ self._diff.append(dict(
+ name=name,
+ parameter=parameter,
+ active=active,
+ ))
+
+ def merge(self, other_tracker):
+ self._diff.extend(other_tracker._diff)
+
+ @property
+ def empty(self):
+ return len(self._diff) == 0
+
+ def get_before_after(self):
+ '''
+ Return texts ``before`` and ``after``.
+ '''
+ before = dict()
+ after = dict()
+ for item in self._diff:
+ before[item['name']] = item['active']
+ after[item['name']] = item['parameter']
+ return before, after
+
+ def has_difference_for(self, name):
+ '''
+ Returns a boolean if a difference exists for name
+ '''
+ return any(diff for diff in self._diff if diff['name'] == name)
+
+ def get_legacy_docker_container_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = []
+ for entry in self._diff:
+ item = dict()
+ item[entry['name']] = dict(
+ parameter=entry['parameter'],
+ container=entry['active'],
+ )
+ result.append(item)
+ return result
+
+ def get_legacy_docker_diffs(self):
+ '''
+ Return differences in the docker_container legacy format.
+ '''
+ result = [entry['name'] for entry in self._diff]
+ return result
+
+
+def clean_dict_booleans_for_docker_api(data):
+ '''
+ Go doesn't like Python booleans 'True' or 'False', while Ansible is just
+ fine with them in YAML. As such, they need to be converted in cases where
+ we pass dictionaries to the Docker API (e.g. docker_network's
+ driver_options and docker_prune's filters).
+ '''
+ result = dict()
+ if data is not None:
+ for k, v in data.items():
+ if v is True:
+ v = 'true'
+ elif v is False:
+ v = 'false'
+ else:
+ v = str(v)
+ result[str(k)] = v
+ return result
+
+
+def convert_duration_to_nanosecond(time_str):
+ """
+ Return time duration in nanosecond.
+ """
+ if not isinstance(time_str, str):
+ raise ValueError('Missing unit in duration - %s' % time_str)
+
+ regex = re.compile(
+ r'^(((?P<hours>\d+)h)?'
+ r'((?P<minutes>\d+)m(?!s))?'
+ r'((?P<seconds>\d+)s)?'
+ r'((?P<milliseconds>\d+)ms)?'
+ r'((?P<microseconds>\d+)us)?)$'
+ )
+ parts = regex.match(time_str)
+
+ if not parts:
+ raise ValueError('Invalid time duration - %s' % time_str)
+
+ parts = parts.groupdict()
+ time_params = {}
+ for (name, value) in parts.items():
+ if value:
+ time_params[name] = int(value)
+
+ delta = timedelta(**time_params)
+ time_in_nanoseconds = (
+ delta.microseconds + (delta.seconds + delta.days * 24 * 3600) * 10 ** 6
+ ) * 10 ** 3
+
+ return time_in_nanoseconds
+
+
+def parse_healthcheck(healthcheck):
+ """
+ Return dictionary of healthcheck parameters and boolean if
+ healthcheck defined in image was requested to be disabled.
+ """
+ if (not healthcheck) or (not healthcheck.get('test')):
+ return None, None
+
+ result = dict()
+
+ # All supported healthcheck parameters
+ options = dict(
+ test='test',
+ interval='interval',
+ timeout='timeout',
+ start_period='start_period',
+ retries='retries'
+ )
+
+ duration_options = ['interval', 'timeout', 'start_period']
+
+ for (key, value) in options.items():
+ if value in healthcheck:
+ if healthcheck.get(value) is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value in duration_options:
+ time = convert_duration_to_nanosecond(healthcheck.get(value))
+ if time:
+ result[key] = time
+ elif healthcheck.get(value):
+ result[key] = healthcheck.get(value)
+ if key == 'test':
+ if isinstance(result[key], (tuple, list)):
+ result[key] = [str(e) for e in result[key]]
+ else:
+ result[key] = ['CMD-SHELL', str(result[key])]
+ elif key == 'retries':
+ try:
+ result[key] = int(result[key])
+ except ValueError:
+ raise ValueError(
+ 'Cannot parse number of retries for healthcheck. '
+ 'Expected an integer, got "{0}".'.format(result[key])
+ )
+
+ if result['test'] == ['NONE']:
+ # If the user explicitly disables the healthcheck, return None
+ # as the healthcheck object, and set disable_healthcheck to True
+ return None, True
+
+ return result, False
+
+
+def omit_none_from_dict(d):
+ """
+ Return a copy of the dictionary with all keys with value None omitted.
+ """
+ return dict((k, v) for (k, v) in d.items() if v is not None)
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/swarm.py b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/swarm.py
new file mode 100644
index 00000000..04e34cc7
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/module_utils/swarm.py
@@ -0,0 +1,280 @@
+# (c) 2019 Piotr Wojciechowski (@wojciechowskipiotr) <piotr@it-playground.pl>
+# (c) Thierry Bouvet (@tbouvet)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import json
+from time import sleep
+
+try:
+ from docker.errors import APIError, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible.module_utils._text import to_native
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ LooseVersion,
+)
+
+
+class AnsibleDockerSwarmClient(AnsibleDockerClient):
+
+ def __init__(self, **kwargs):
+ super(AnsibleDockerSwarmClient, self).__init__(**kwargs)
+
+ def get_swarm_node_id(self):
+ """
+ Get the 'NodeID' of the Swarm node or 'None' if host is not in Swarm. It returns the NodeID
+ of Docker host the module is executed on
+ :return:
+ NodeID of host or 'None' if not part of Swarm
+ """
+
+ try:
+ info = self.info()
+ except APIError as exc:
+ self.fail("Failed to get node information for %s" % to_native(exc))
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return swarm_info['Swarm']['NodeID']
+ return None
+
+ def check_if_swarm_node(self, node_id=None):
+ """
+ Checking if host is part of Docker Swarm. If 'node_id' is not provided it reads the Docker host
+ system information looking if specific key in output exists. If 'node_id' is provided then it tries to
+ read node information assuming it is run on Swarm manager. The get_node_inspect() method handles exception if
+ it is not executed on Swarm manager
+
+ :param node_id: Node identifier
+ :return:
+ bool: True if node is part of Swarm, False otherwise
+ """
+
+ if node_id is None:
+ try:
+ info = self.info()
+ except APIError:
+ self.fail("Failed to get host information.")
+
+ if info:
+ json_str = json.dumps(info, ensure_ascii=False)
+ swarm_info = json.loads(json_str)
+ if swarm_info['Swarm']['NodeID']:
+ return True
+ if swarm_info['Swarm']['LocalNodeState'] in ('active', 'pending', 'locked'):
+ return True
+ return False
+ else:
+ try:
+ node_info = self.get_node_inspect(node_id=node_id)
+ except APIError:
+ return
+
+ if node_info['ID'] is not None:
+ return True
+ return False
+
+ def check_if_swarm_manager(self):
+ """
+ Checks if node role is set as Manager in Swarm. The node is the docker host on which module action
+ is performed. The inspect_swarm() will fail if node is not a manager
+
+ :return: True if node is Swarm Manager, False otherwise
+ """
+
+ try:
+ self.inspect_swarm()
+ return True
+ except APIError:
+ return False
+
+ def fail_task_if_not_swarm_manager(self):
+ """
+ If host is not a swarm manager then Ansible task on this host should end with 'failed' state
+ """
+ if not self.check_if_swarm_manager():
+ self.fail("Error running docker swarm module: must run on swarm manager node")
+
+ def check_if_swarm_worker(self):
+ """
+ Checks if node role is set as Worker in Swarm. The node is the docker host on which module action
+ is performed. Will fail if run on host that is not part of Swarm via check_if_swarm_node()
+
+ :return: True if node is Swarm Worker, False otherwise
+ """
+
+ if self.check_if_swarm_node() and not self.check_if_swarm_manager():
+ return True
+ return False
+
+ def check_if_swarm_node_is_down(self, node_id=None, repeat_check=1):
+ """
+ Checks if node status on Swarm manager is 'down'. If node_id is provided it query manager about
+ node specified in parameter, otherwise it query manager itself. If run on Swarm Worker node or
+ host that is not part of Swarm it will fail the playbook
+
+ :param repeat_check: number of check attempts with 5 seconds delay between them, by default check only once
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ True if node is part of swarm but its state is down, False otherwise
+ """
+
+ if repeat_check < 1:
+ repeat_check = 1
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ for retry in range(0, repeat_check):
+ if retry > 0:
+ sleep(5)
+ node_info = self.get_node_inspect(node_id=node_id)
+ if node_info['Status']['State'] == 'down':
+ return True
+ return False
+
+ def get_node_inspect(self, node_id=None, skip_missing=False):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about single node
+
+ :param skip_missing: if True then function will return None instead of failing the task
+ :param node_id: node ID or name, if None then method will try to get node_id of host module run on
+ :return:
+ Single node information structure
+ """
+
+ if node_id is None:
+ node_id = self.get_swarm_node_id()
+
+ if node_id is None:
+ self.fail("Failed to get node information.")
+
+ try:
+ node_info = self.inspect_node(node_id=node_id)
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ if exc.status_code == 404:
+ if skip_missing:
+ return None
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+
+ if 'ManagerStatus' in node_info:
+ if node_info['ManagerStatus'].get('Leader'):
+ # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0
+ # Check moby/moby#35437 for details
+ count_colons = node_info['ManagerStatus']['Addr'].count(":")
+ if count_colons == 1:
+ swarm_leader_ip = node_info['ManagerStatus']['Addr'].split(":", 1)[0] or node_info['Status']['Addr']
+ else:
+ swarm_leader_ip = node_info['Status']['Addr']
+ node_info['Status']['Addr'] = swarm_leader_ip
+ return node_info
+
+ def get_all_nodes_inspect(self):
+ """
+ Returns Swarm node info as in 'docker node inspect' command about all registered nodes
+
+ :return:
+ Structure with information about all nodes
+ """
+ try:
+ node_info = self.nodes()
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect node: To inspect node execute module on Swarm Manager")
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ except Exception as exc:
+ self.fail("Error inspecting swarm node: %s" % exc)
+
+ json_str = json.dumps(node_info, ensure_ascii=False)
+ node_info = json.loads(json_str)
+ return node_info
+
+ def get_all_nodes_list(self, output='short'):
+ """
+ Returns list of nodes registered in Swarm
+
+ :param output: Defines format of returned data
+ :return:
+ If 'output' is 'short' then return data is list of nodes hostnames registered in Swarm,
+ if 'output' is 'long' then returns data is list of dict containing the attributes as in
+ output of command 'docker node ls'
+ """
+ nodes_list = []
+
+ nodes_inspect = self.get_all_nodes_inspect()
+ if nodes_inspect is None:
+ return None
+
+ if output == 'short':
+ for node in nodes_inspect:
+ nodes_list.append(node['Description']['Hostname'])
+ elif output == 'long':
+ for node in nodes_inspect:
+ node_property = {}
+
+ node_property.update({'ID': node['ID']})
+ node_property.update({'Hostname': node['Description']['Hostname']})
+ node_property.update({'Status': node['Status']['State']})
+ node_property.update({'Availability': node['Spec']['Availability']})
+ if 'ManagerStatus' in node:
+ if node['ManagerStatus']['Leader'] is True:
+ node_property.update({'Leader': True})
+ node_property.update({'ManagerStatus': node['ManagerStatus']['Reachability']})
+ node_property.update({'EngineVersion': node['Description']['Engine']['EngineVersion']})
+
+ nodes_list.append(node_property)
+ else:
+ return None
+
+ return nodes_list
+
+ def get_node_name_by_id(self, nodeid):
+ return self.get_node_inspect(nodeid)['Description']['Hostname']
+
+ def get_unlock_key(self):
+ if self.docker_py_version < LooseVersion('2.7.0'):
+ return None
+ return super(AnsibleDockerSwarmClient, self).get_unlock_key()
+
+ def get_service_inspect(self, service_id, skip_missing=False):
+ """
+ Returns Swarm service info as in 'docker service inspect' command about single service
+
+ :param service_id: service ID or name
+ :param skip_missing: if True then function will return None instead of failing the task
+ :return:
+ Single service information structure
+ """
+ try:
+ service_info = self.inspect_service(service_id)
+ except NotFound as exc:
+ if skip_missing is False:
+ self.fail("Error while reading from Swarm manager: %s" % to_native(exc))
+ else:
+ return None
+ except APIError as exc:
+ if exc.status_code == 503:
+ self.fail("Cannot inspect service: To inspect service execute module on Swarm Manager")
+ self.fail("Error inspecting swarm service: %s" % exc)
+ except Exception as exc:
+ self.fail("Error inspecting swarm service: %s" % exc)
+
+ json_str = json.dumps(service_info, ensure_ascii=False)
+ service_info = json.loads(json_str)
+ return service_info
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/current_container_facts.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/current_container_facts.py
new file mode 100644
index 00000000..926c6ee4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/current_container_facts.py
@@ -0,0 +1,100 @@
+#!/usr/bin/python
+#
+# (c) 2020 Matt Clay <mclay@redhat.com>
+# (c) 2020 Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: current_container_facts
+short_description: Return facts about whether the module runs in a Docker container
+version_added: 1.1.0
+description:
+ - Return facts about whether the module runs in a Docker container.
+author:
+ - Felix Fontein (@felixfontein)
+'''
+
+EXAMPLES = '''
+- name: Get facts on current container
+ community.docker.current_container_facts:
+
+- name: Print information on current container when running in a container
+ ansible.builtin.debug:
+ msg: "Container ID is {{ ansible_module_container_id }}"
+ when: ansible_module_running_in_container
+'''
+
+RETURN = '''
+ansible_facts:
+ description: Ansible facts returned by the module
+ type: dict
+ returned: always
+ contains:
+ ansible_module_running_in_container:
+ description:
+ - Whether the module was able to detect that it runs in a container or not.
+ returned: always
+ type: bool
+ ansible_module_container_id:
+ description:
+ - The detected container ID.
+ - Contains an empty string if no container was detected.
+ returned: always
+ type: str
+ ansible_module_container_type:
+ description:
+ - The detected container environment.
+ - Contains an empty string if no container was detected.
+ - Otherwise, will be one of C(docker) or C(azure_pipelines).
+ returned: always
+ type: str
+ # choices:
+ # - docker
+ # - azure_pipelines
+'''
+
+import os
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+def main():
+ module = AnsibleModule(dict(), supports_check_mode=True)
+
+ path = '/proc/self/cpuset'
+ container_id = ''
+ container_type = ''
+
+ if os.path.exists(path):
+ # File content varies based on the environment:
+ # No Container: /
+ # Docker: /docker/c86f3732b5ba3d28bb83b6e14af767ab96abbc52de31313dcb1176a62d91a507
+ # Azure Pipelines (Docker): /azpl_job/0f2edfed602dd6ec9f2e42c867f4d5ee640ebf4c058e6d3196d4393bb8fd0891
+ # Podman: /../../../../../..
+ with open(path, 'rb') as f:
+ contents = f.read().decode('utf-8')
+
+ cgroup_path, cgroup_name = os.path.split(contents.strip())
+
+ if cgroup_path == '/docker':
+ container_id = cgroup_name
+ container_type = 'docker'
+
+ if cgroup_path == '/azpl_job':
+ container_id = cgroup_name
+ container_type = 'azure_pipelines'
+
+ module.exit_json(ansible_facts=dict(
+ ansible_module_running_in_container=container_id != '',
+ ansible_module_container_id=container_id,
+ ansible_module_container_type=container_type,
+ ))
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_compose.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_compose.py
new file mode 100644
index 00000000..e8b8532c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_compose.py
@@ -0,0 +1,1148 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+
+module: docker_compose
+
+short_description: Manage multi-container Docker applications with Docker Compose.
+
+
+author: "Chris Houseknecht (@chouseknecht)"
+
+description:
+ - Uses Docker Compose to start, shutdown and scale services.
+ - Works with compose versions 1 and 2.
+ - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option.
+ - See the examples for more details.
+ - Supports check mode.
+ - This module was called C(docker_service) before Ansible 2.8. The usage did not change.
+
+options:
+ project_src:
+ description:
+ - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file.
+ - Mutually exclusive with I(definition).
+ - Required when no I(definition) is provided.
+ type: path
+ project_name:
+ description:
+ - Provide a project name. If not provided, the project name is taken from the basename of I(project_src).
+ - Required when I(definition) is provided.
+ type: str
+ files:
+ description:
+ - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml).
+ - Files are loaded and merged in the order given.
+ type: list
+ elements: path
+ state:
+ description:
+ - Desired state of the project.
+ - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart)
+ (with I(restarted)).
+ - Specifying C(absent) is the same as running C(docker-compose down).
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ services:
+ description:
+ - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted))
+ on a subset of services.
+ - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)).
+ type: list
+ elements: str
+ scale:
+ description:
+ - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key
+ is the name of the service and the value is an integer count for the number of containers.
+ type: dict
+ dependencies:
+ description:
+ - When I(state) is C(present) specify whether or not to include linked services.
+ type: bool
+ default: yes
+ definition:
+ description:
+ - Compose file describing one or more services, networks and volumes.
+ - Mutually exclusive with I(project_src) and I(files).
+ type: dict
+ hostname_check:
+ description:
+ - Whether or not to check the Docker daemon's hostname against the name provided in the client certificate.
+ type: bool
+ default: no
+ recreate:
+ description:
+ - By default containers will be recreated when their configuration differs from the service definition.
+ - Setting to C(never) ignores configuration differences and leaves existing containers unchanged.
+ - Setting to C(always) forces recreation of all existing containers.
+ type: str
+ default: smart
+ choices:
+ - always
+ - never
+ - smart
+ build:
+ description:
+ - Use with I(state) C(present) to always build images prior to starting the application.
+ - Same as running C(docker-compose build) with the pull option.
+ - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents.
+ - Use the I(nocache) option to ignore the image cache when performing the build.
+ - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ pull:
+ description:
+ - Use with I(state) C(present) to always pull images prior to starting the application.
+ - Same as running C(docker-compose pull).
+ - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never).
+ type: bool
+ default: no
+ nocache:
+ description:
+ - Use with the I(build) option to ignore the cache during the image build process.
+ type: bool
+ default: no
+ remove_images:
+ description:
+ - Use with I(state) C(absent) to remove all images or only local images.
+ type: str
+ choices:
+ - 'all'
+ - 'local'
+ remove_volumes:
+ description:
+ - Use with I(state) C(absent) to remove data volumes.
+ type: bool
+ default: no
+ stopped:
+ description:
+ - Use with I(state) C(present) to stop all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be stopped.
+ type: bool
+ default: no
+ restarted:
+ description:
+ - Use with I(state) C(present) to restart all containers defined in the Compose file.
+ - If I(services) is defined, only the containers listed there will be restarted.
+ type: bool
+ default: no
+ remove_orphans:
+ description:
+ - Remove containers for services not defined in the Compose file.
+ type: bool
+ default: no
+ timeout:
+ description:
+ - timeout in seconds for container shutdown when attached or when containers are already running.
+ type: int
+ default: 10
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "docker-compose >= 1.7.0"
+ - "Docker API >= 1.20"
+ - "PyYAML >= 3.11"
+'''
+
+EXAMPLES = '''
+# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the
+# flask directory
+
+- name: Run using a project directory
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - name: Tear down existing services
+ community.docker.docker_compose:
+ project_src: flask
+ state: absent
+
+ - name: Create and start services
+ community.docker.docker_compose:
+ project_src: flask
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - name: Run `docker-compose up` again
+ community.docker.docker_compose:
+ project_src: flask
+ build: no
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that: "not output.changed "
+
+ - name: Stop all services
+ community.docker.docker_compose:
+ project_src: flask
+ build: no
+ stopped: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "not web.flask_web_1.state.running"
+ - "not db.flask_db_1.state.running"
+
+ - name: Restart services
+ community.docker.docker_compose:
+ project_src: flask
+ build: no
+ restarted: yes
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Scale the web service to 2
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.docker.docker_compose:
+ project_src: flask
+ scale:
+ web: 2
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+- name: Run with inline v2 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.docker.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.docker.docker_compose:
+ project_name: flask
+ definition:
+ version: '2'
+ services:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ depends_on:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+
+- name: Run with inline v1 compose
+ hosts: localhost
+ gather_facts: no
+ tasks:
+ - community.docker.docker_compose:
+ project_src: flask
+ state: absent
+
+ - community.docker.docker_compose:
+ project_name: flask
+ definition:
+ db:
+ image: postgres
+ web:
+ build: "{{ playbook_dir }}/flask"
+ command: "python manage.py runserver 0.0.0.0:8000"
+ volumes:
+ - "{{ playbook_dir }}/flask:/code"
+ ports:
+ - "8000:8000"
+ links:
+ - db
+ register: output
+
+ - ansible.builtin.debug:
+ var: output
+
+ - ansible.builtin.assert:
+ that:
+ - "web.flask_web_1.state.running"
+ - "db.flask_db_1.state.running"
+'''
+
+RETURN = '''
+services:
+ description:
+ - A dictionary mapping the service's name to a dictionary of containers.
+ returned: success
+ type: complex
+ contains:
+ container_name:
+ description: Name of the container. Format is C(project_service_#).
+ returned: success
+ type: complex
+ contains:
+ cmd:
+ description: One or more commands to be executed in the container.
+ returned: success
+ type: list
+ elements: str
+ example: ["postgres"]
+ image:
+ description: Name of the image from which the container was built.
+ returned: success
+ type: str
+ example: postgres
+ labels:
+ description: Meta data assigned to the container.
+ returned: success
+ type: dict
+ example: {...}
+ networks:
+ description: Contains a dictionary for each network to which the container is a member.
+ returned: success
+ type: list
+ elements: dict
+ contains:
+ IPAddress:
+ description: The IP address assigned to the container.
+ returned: success
+ type: str
+ example: 172.17.0.2
+ IPPrefixLen:
+ description: Number of bits used by the subnet.
+ returned: success
+ type: int
+ example: 16
+ aliases:
+ description: Aliases assigned to the container by the network.
+ returned: success
+ type: list
+ elements: str
+ example: ['db']
+ globalIPv6:
+ description: IPv6 address assigned to the container.
+ returned: success
+ type: str
+ example: ''
+ globalIPv6PrefixLen:
+ description: IPv6 subnet length.
+ returned: success
+ type: int
+ example: 0
+ links:
+ description: List of container names to which this container is linked.
+ returned: success
+ type: list
+ elements: str
+ example: null
+ macAddress:
+ description: Mac Address assigned to the virtual NIC.
+ returned: success
+ type: str
+ example: "02:42:ac:11:00:02"
+ state:
+ description: Information regarding the current disposition of the container.
+ returned: success
+ type: dict
+ contains:
+ running:
+ description: Whether or not the container is up with a running process.
+ returned: success
+ type: bool
+ example: true
+ status:
+ description: Description of the running state.
+ returned: success
+ type: str
+ example: running
+
+actions:
+ description: Provides the actions to be taken on each service as determined by compose.
+ returned: when in check mode or I(debug) is C(yes)
+ type: complex
+ contains:
+ service_name:
+ description: Name of the service.
+ returned: always
+ type: complex
+ contains:
+ pulled_image:
+ description: Provides image details when a new image is pulled for the service.
+ returned: on image pull
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+ built_image:
+ description: Provides image details when a new image is built for the service.
+ returned: on image build
+ type: complex
+ contains:
+ name:
+ description: name of the image
+ returned: always
+ type: str
+ id:
+ description: image hash
+ returned: always
+ type: str
+
+ action:
+ description: A descriptive name of the action to be performed on the service's containers.
+ returned: always
+ type: list
+ elements: str
+ contains:
+ id:
+ description: the container's long ID
+ returned: always
+ type: str
+ name:
+ description: the container's name
+ returned: always
+ type: str
+ short_id:
+ description: the container's short ID
+ returned: always
+ type: str
+'''
+
+import os
+import re
+import sys
+import tempfile
+import traceback
+from contextlib import contextmanager
+from distutils.version import LooseVersion
+
+try:
+ import yaml
+ HAS_YAML = True
+ HAS_YAML_EXC = None
+except ImportError as dummy:
+ HAS_YAML = False
+ HAS_YAML_EXC = traceback.format_exc()
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+try:
+ from compose import __version__ as compose_version
+ from compose.cli.command import project_from_options
+ from compose.service import NoSuchImageError
+ from compose.cli.main import convergence_strategy_from_opts, build_action_from_opts, image_type_from_opt
+ from compose.const import DEFAULT_TIMEOUT, LABEL_SERVICE, LABEL_PROJECT, LABEL_ONE_OFF
+ HAS_COMPOSE = True
+ HAS_COMPOSE_EXC = None
+ MINIMUM_COMPOSE_VERSION = '1.7.0'
+except ImportError as dummy:
+ HAS_COMPOSE = False
+ HAS_COMPOSE_EXC = traceback.format_exc()
+ DEFAULT_TIMEOUT = 10
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+
+
+AUTH_PARAM_MAPPING = {
+ u'docker_host': u'--host',
+ u'tls': u'--tls',
+ u'cacert_path': u'--tlscacert',
+ u'cert_path': u'--tlscert',
+ u'key_path': u'--tlskey',
+ u'tls_verify': u'--tlsverify'
+}
+
+
+@contextmanager
+def stdout_redirector(path_name):
+ old_stdout = sys.stdout
+ fd = open(path_name, 'w')
+ sys.stdout = fd
+ try:
+ yield
+ finally:
+ sys.stdout = old_stdout
+
+
+@contextmanager
+def stderr_redirector(path_name):
+ old_fh = sys.stderr
+ fd = open(path_name, 'w')
+ sys.stderr = fd
+ try:
+ yield
+ finally:
+ sys.stderr = old_fh
+
+
+def make_redirection_tempfiles():
+ dummy, out_redir_name = tempfile.mkstemp(prefix="ansible")
+ dummy, err_redir_name = tempfile.mkstemp(prefix="ansible")
+ return (out_redir_name, err_redir_name)
+
+
+def cleanup_redirection_tempfiles(out_name, err_name):
+ for i in [out_name, err_name]:
+ os.remove(i)
+
+
+def get_redirected_output(path_name):
+ output = []
+ with open(path_name, 'r') as fd:
+ for line in fd:
+ # strip terminal format/color chars
+ new_line = re.sub(r'\x1b\[.+m', '', line)
+ output.append(new_line)
+ os.remove(path_name)
+ return output
+
+
+def attempt_extract_errors(exc_str, stdout, stderr):
+ errors = [l.strip() for l in stderr if l.strip().startswith('ERROR:')]
+ errors.extend([l.strip() for l in stdout if l.strip().startswith('ERROR:')])
+
+ warnings = [l.strip() for l in stderr if l.strip().startswith('WARNING:')]
+ warnings.extend([l.strip() for l in stdout if l.strip().startswith('WARNING:')])
+
+ # assume either the exception body (if present) or the last warning was the 'most'
+ # fatal.
+
+ if exc_str.strip():
+ msg = exc_str.strip()
+ elif errors:
+ msg = errors[-1].encode('utf-8')
+ else:
+ msg = 'unknown cause'
+
+ return {
+ 'warnings': [w.encode('utf-8') for w in warnings],
+ 'errors': [e.encode('utf-8') for e in errors],
+ 'msg': msg,
+ 'module_stderr': ''.join(stderr),
+ 'module_stdout': ''.join(stdout)
+ }
+
+
+def get_failure_info(exc, out_name, err_name=None, msg_format='%s'):
+ if err_name is None:
+ stderr = []
+ else:
+ stderr = get_redirected_output(err_name)
+ stdout = get_redirected_output(out_name)
+
+ reason = attempt_extract_errors(str(exc), stdout, stderr)
+ reason['msg'] = msg_format % reason['msg']
+ return reason
+
+
+class ContainerManager(DockerBaseClass):
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ self.client = client
+ self.project_src = None
+ self.files = None
+ self.project_name = None
+ self.state = None
+ self.definition = None
+ self.hostname_check = None
+ self.timeout = None
+ self.remove_images = None
+ self.remove_orphans = None
+ self.remove_volumes = None
+ self.stopped = None
+ self.restarted = None
+ self.recreate = None
+ self.build = None
+ self.dependencies = None
+ self.services = None
+ self.scale = None
+ self.debug = None
+ self.pull = None
+ self.nocache = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+ self.check_mode = client.check_mode
+
+ if not self.debug:
+ self.debug = client.module._debug
+
+ self.options = dict()
+ self.options.update(self._get_auth_options())
+ self.options[u'--skip-hostname-check'] = (not self.hostname_check)
+
+ if self.project_name:
+ self.options[u'--project-name'] = self.project_name
+
+ if self.files:
+ self.options[u'--file'] = self.files
+
+ if not HAS_COMPOSE:
+ self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" %
+ HAS_COMPOSE_EXC)
+
+ if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION):
+ self.client.fail("Found docker-compose version %s. Minimum required version is %s. "
+ "Upgrade docker-compose to a min version of %s." %
+ (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION))
+
+ if self.restarted and self.stopped:
+ self.client.fail("Cannot use restarted and stopped at the same time.")
+
+ self.log("options: ")
+ self.log(self.options, pretty_print=True)
+
+ if self.definition:
+ if not HAS_YAML:
+ self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % HAS_YAML_EXC)
+
+ if not self.project_name:
+ self.client.fail("Parameter error - project_name required when providing definition.")
+
+ self.project_src = tempfile.mkdtemp(prefix="ansible")
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ try:
+ self.log('writing: ')
+ self.log(yaml.dump(self.definition, default_flow_style=False))
+ with open(compose_file, 'w') as f:
+ f.write(yaml.dump(self.definition, default_flow_style=False))
+ except Exception as exc:
+ self.client.fail("Error writing to %s - %s" % (compose_file, str(exc)))
+ else:
+ if not self.project_src:
+ self.client.fail("Parameter error - project_src required.")
+
+ try:
+ self.log("project_src: %s" % self.project_src)
+ self.project = project_from_options(self.project_src, self.options)
+ except Exception as exc:
+ self.client.fail("Configuration error - %s" % str(exc))
+
+ def exec_module(self):
+ result = dict()
+
+ if self.state == 'present':
+ result = self.cmd_up()
+ elif self.state == 'absent':
+ result = self.cmd_down()
+
+ if self.definition:
+ compose_file = os.path.join(self.project_src, "docker-compose.yml")
+ self.log("removing %s" % compose_file)
+ os.remove(compose_file)
+ self.log("removing %s" % self.project_src)
+ os.rmdir(self.project_src)
+
+ if not self.check_mode and not self.debug and result.get('actions'):
+ result.pop('actions')
+
+ return result
+
+ def _get_auth_options(self):
+ options = dict()
+ for key, value in self.client.auth_params.items():
+ if value is not None:
+ option = AUTH_PARAM_MAPPING.get(key)
+ if option:
+ options[option] = value
+ return options
+
+ def cmd_up(self):
+
+ start_deps = self.dependencies
+ service_names = self.services
+ detached = True
+ result = dict(changed=False, actions=[], services=dict())
+
+ up_options = {
+ u'--no-recreate': False,
+ u'--build': False,
+ u'--no-build': False,
+ u'--no-deps': False,
+ u'--force-recreate': False,
+ }
+
+ if self.recreate == 'never':
+ up_options[u'--no-recreate'] = True
+ elif self.recreate == 'always':
+ up_options[u'--force-recreate'] = True
+
+ if self.remove_orphans:
+ up_options[u'--remove-orphans'] = True
+
+ converge = convergence_strategy_from_opts(up_options)
+ self.log("convergence strategy: %s" % converge)
+
+ if self.pull:
+ pull_output = self.cmd_pull()
+ result['changed'] = pull_output['changed']
+ result['actions'] += pull_output['actions']
+
+ if self.build:
+ build_output = self.cmd_build()
+ result['changed'] = build_output['changed']
+ result['actions'] += build_output['actions']
+
+ if self.remove_orphans:
+ containers = self.client.containers(
+ filters={
+ 'label': [
+ '{0}={1}'.format(LABEL_PROJECT, self.project.name),
+ '{0}={1}'.format(LABEL_ONE_OFF, "False")
+ ],
+ }
+ )
+
+ orphans = []
+ for container in containers:
+ service_name = container.get('Labels', {}).get(LABEL_SERVICE)
+ if service_name not in self.project.service_names:
+ orphans.append(service_name)
+
+ if orphans:
+ result['changed'] = True
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ plan = service.convergence_plan(strategy=converge)
+ if plan.action != 'noop':
+ result['changed'] = True
+ result_action = dict(service=service.name)
+ result_action[plan.action] = []
+ for container in plan.containers:
+ result_action[plan.action].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id,
+ ))
+ result['actions'].append(result_action)
+
+ if not self.check_mode and result['changed'] and not self.stopped:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ do_build = build_action_from_opts(up_options)
+ self.log('Setting do_build to %s' % do_build)
+ self.project.up(
+ service_names=service_names,
+ start_deps=start_deps,
+ strategy=converge,
+ do_build=do_build,
+ detached=detached,
+ remove_orphans=self.remove_orphans,
+ timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error starting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if self.stopped:
+ stop_output = self.cmd_stop(service_names)
+ result['changed'] = stop_output['changed']
+ result['actions'] += stop_output['actions']
+
+ if self.restarted:
+ restart_output = self.cmd_restart(service_names)
+ result['changed'] = restart_output['changed']
+ result['actions'] += restart_output['actions']
+
+ if self.scale:
+ scale_output = self.cmd_scale()
+ result['changed'] = scale_output['changed']
+ result['actions'] += scale_output['actions']
+
+ for service in self.project.services:
+ service_facts = dict()
+ result['services'][service.name] = service_facts
+ for container in service.containers(stopped=True):
+ inspection = container.inspect()
+ # pare down the inspection data to the most useful bits
+ facts = dict(
+ cmd=[],
+ labels=dict(),
+ image=None,
+ state=dict(
+ running=None,
+ status=None
+ ),
+ networks=dict()
+ )
+ if inspection['Config'].get('Cmd', None) is not None:
+ facts['cmd'] = inspection['Config']['Cmd']
+ if inspection['Config'].get('Labels', None) is not None:
+ facts['labels'] = inspection['Config']['Labels']
+ if inspection['Config'].get('Image', None) is not None:
+ facts['image'] = inspection['Config']['Image']
+ if inspection['State'].get('Running', None) is not None:
+ facts['state']['running'] = inspection['State']['Running']
+ if inspection['State'].get('Status', None) is not None:
+ facts['state']['status'] = inspection['State']['Status']
+
+ if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'):
+ networks = inspection['NetworkSettings']['Networks']
+ for key in networks:
+ facts['networks'][key] = dict(
+ aliases=[],
+ globalIPv6=None,
+ globalIPv6PrefixLen=0,
+ IPAddress=None,
+ IPPrefixLen=0,
+ links=None,
+ macAddress=None,
+ )
+ if networks[key].get('Aliases', None) is not None:
+ facts['networks'][key]['aliases'] = networks[key]['Aliases']
+ if networks[key].get('GlobalIPv6Address', None) is not None:
+ facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address']
+ if networks[key].get('GlobalIPv6PrefixLen', None) is not None:
+ facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen']
+ if networks[key].get('IPAddress', None) is not None:
+ facts['networks'][key]['IPAddress'] = networks[key]['IPAddress']
+ if networks[key].get('IPPrefixLen', None) is not None:
+ facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen']
+ if networks[key].get('Links', None) is not None:
+ facts['networks'][key]['links'] = networks[key]['Links']
+ if networks[key].get('MacAddress', None) is not None:
+ facts['networks'][key]['macAddress'] = networks[key]['MacAddress']
+
+ service_facts[container.name] = facts
+
+ return result
+
+ def cmd_pull(self):
+ result = dict(
+ changed=False,
+ actions=[],
+ )
+
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if 'image' not in service.options:
+ continue
+
+ self.log('Pulling image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # pull the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.pull(ignore_pull_failures=False)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: pull failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ # store the new image ID
+ new_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ new_image_id = image['Id']
+ except NoSuchImageError as exc:
+ self.client.fail("Error: service image lookup failed after pull - %s" % str(exc))
+
+ if new_image_id != old_image_id:
+ # if a new image was pulled
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ pulled_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_build(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ if not self.check_mode:
+ for service in self.project.get_services(self.services, include_deps=False):
+ if service.can_be_built():
+ self.log('Building image for service %s' % service.name)
+ # store the existing image ID
+ old_image_id = ''
+ try:
+ image = service.image()
+ if image and image.get('Id'):
+ old_image_id = image['Id']
+ except NoSuchImageError:
+ pass
+ except Exception as exc:
+ self.client.fail("Error: service image lookup failed - %s" % str(exc))
+
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ # build the image
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ new_image_id = service.build(pull=self.pull, no_cache=self.nocache)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error: build failed with %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+
+ if new_image_id not in old_image_id:
+ # if a new image was built
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ built_image=dict(
+ name=service.image_name,
+ id=new_image_id
+ )
+ ))
+ return result
+
+ def cmd_down(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ containers = service.containers(stopped=True)
+ if len(containers):
+ result['changed'] = True
+ result['actions'].append(dict(
+ service=service.name,
+ deleted=[container.name for container in containers]
+ ))
+ if not self.check_mode and result['changed']:
+ image_type = image_type_from_opt('--rmi', self.remove_images)
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.down(image_type, self.remove_volumes, self.remove_orphans)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project - %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_stop(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ stop=[]
+ )
+ for container in service.containers(stopped=False):
+ result['changed'] = True
+ service_res['stop'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.stop(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error stopping project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_restart(self, service_names):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+
+ for service in self.project.services:
+ if not service_names or service.name in service_names:
+ service_res = dict(
+ service=service.name,
+ restart=[]
+ )
+ for container in service.containers(stopped=True):
+ result['changed'] = True
+ service_res['restart'].append(dict(
+ id=container.id,
+ name=container.name,
+ short_id=container.short_id
+ ))
+ result['actions'].append(service_res)
+
+ if not self.check_mode and result['changed']:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ self.project.restart(service_names=service_names, timeout=self.timeout)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error restarting project %s")
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ return result
+
+ def cmd_scale(self):
+ result = dict(
+ changed=False,
+ actions=[]
+ )
+ for service in self.project.services:
+ if service.name in self.scale:
+ service_res = dict(
+ service=service.name,
+ scale=0
+ )
+ containers = service.containers(stopped=True)
+ scale = self.parse_scale(service.name)
+ if len(containers) != scale:
+ result['changed'] = True
+ service_res['scale'] = scale - len(containers)
+ if not self.check_mode:
+ out_redir_name, err_redir_name = make_redirection_tempfiles()
+ try:
+ with stdout_redirector(out_redir_name):
+ with stderr_redirector(err_redir_name):
+ service.scale(scale)
+ except Exception as exc:
+ fail_reason = get_failure_info(exc, out_redir_name, err_redir_name,
+ msg_format="Error scaling {0} - %s".format(service.name))
+ self.client.fail(**fail_reason)
+ else:
+ cleanup_redirection_tempfiles(out_redir_name, err_redir_name)
+ result['actions'].append(service_res)
+ return result
+
+ def parse_scale(self, service_name):
+ try:
+ return int(self.scale[service_name])
+ except ValueError:
+ self.client.fail("Error scaling %s - expected int, got %s",
+ service_name, str(type(self.scale[service_name])))
+
+
+def main():
+ argument_spec = dict(
+ project_src=dict(type='path'),
+ project_name=dict(type='str',),
+ files=dict(type='list', elements='path'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ definition=dict(type='dict'),
+ hostname_check=dict(type='bool', default=False),
+ recreate=dict(type='str', default='smart', choices=['always', 'never', 'smart']),
+ build=dict(type='bool', default=False),
+ remove_images=dict(type='str', choices=['all', 'local']),
+ remove_volumes=dict(type='bool', default=False),
+ remove_orphans=dict(type='bool', default=False),
+ stopped=dict(type='bool', default=False),
+ restarted=dict(type='bool', default=False),
+ scale=dict(type='dict'),
+ services=dict(type='list', elements='str'),
+ dependencies=dict(type='bool', default=True),
+ pull=dict(type='bool', default=False),
+ nocache=dict(type='bool', default=False),
+ debug=dict(type='bool', default=False),
+ timeout=dict(type='int', default=DEFAULT_TIMEOUT)
+ )
+
+ mutually_exclusive = [
+ ('definition', 'project_src'),
+ ('definition', 'files')
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ mutually_exclusive=mutually_exclusive,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ result = ContainerManager(client).exec_module()
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_config.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_config.py
new file mode 100644
index 00000000..3791dda4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_config.py
@@ -0,0 +1,299 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_config
+
+short_description: Manage docker configs.
+
+
+description:
+ - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm).
+ - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated
+ unless the I(force) option is set.
+ - Updates to configs are performed by removing the config and creating it again.
+options:
+ data:
+ description:
+ - The value of the config. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string."
+ - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing config.
+ - If C(true), an existing config will be replaced, even if it has not been changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the config.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the config should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.6.0"
+ - "Docker API >= 1.30"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+ - John Hu (@ushuz)
+'''
+
+EXAMPLES = '''
+
+- name: Create config foo (from a file on the control machine)
+ community.docker.docker_config:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/config/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the config data
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the config
+ two: '2'
+ state: present
+
+- name: No change
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing config
+ state: present
+
+- name: Update an existing label
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the config
+ one: '1'
+ state: present
+
+- name: Force the (re-)creation of the config
+ community.docker.docker_config:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove config foo
+ community.docker.docker_config:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+config_id:
+ description:
+ - The ID assigned by Docker to the config object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class ConfigManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ConfigManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_config(self):
+ ''' Find an existing config. '''
+ try:
+ configs = self.client.configs(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing config %s: %s" % (self.name, to_native(exc)))
+
+ for config in configs:
+ if config['Spec']['Name'] == self.name:
+ return config
+ return None
+
+ def create_config(self):
+ ''' Create a new config '''
+ config_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ config_id = self.client.create_config(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating config: %s" % to_native(exc))
+
+ if isinstance(config_id, dict):
+ config_id = config_id['ID']
+
+ return config_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the config '''
+ config = self.get_config()
+ if config:
+ self.results['config_id'] = config['ID']
+ data_changed = False
+ attrs = config.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the config
+ self.absent()
+ config_id = self.create_config()
+ self.results['changed'] = True
+ self.results['config_id'] = config_id
+ else:
+ self.results['changed'] = True
+ self.results['config_id'] = self.create_config()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the config '''
+ config = self.get_config()
+ if config:
+ try:
+ if not self.check_mode:
+ self.client.remove_config(config['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing config %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str'),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.6.0',
+ min_docker_api_version='1.30',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ ConfigManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container.py
new file mode 100644
index 00000000..033b5c72
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container.py
@@ -0,0 +1,3591 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container
+
+short_description: manage docker containers
+
+description:
+ - Manage the life cycle of docker containers.
+ - Supports check mode. Run with C(--check) and C(--diff) to view config difference and list of actions to be taken.
+
+
+notes:
+ - For most config changes, the container needs to be recreated, i.e. the existing container has to be destroyed and
+ a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to
+ prevent this.
+ - If the module needs to recreate the container, it will only use the options provided to the module to create the
+ new container (except I(image)). Therefore, always specify *all* options relevant to the container.
+ - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected.
+ Please note that several options have default values; if the container to be restarted uses different values for
+ these options, it will be recreated instead. The options with default values which can cause this are I(auto_remove),
+ I(detach), I(init), I(interactive), I(memory), I(paused), I(privileged), I(read_only) and I(tty). This behavior
+ can be changed by setting I(container_default_behavior) to C(no_defaults), which will be the default value from
+ community.docker 2.0.0 on.
+
+options:
+ auto_remove:
+ description:
+ - Enable auto-removal of the container on daemon side when the container's process exits.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ blkio_weight:
+ description:
+ - Block IO (relative weight), between 10 and 1000.
+ type: int
+ capabilities:
+ description:
+ - List of capabilities to add to the container.
+ type: list
+ elements: str
+ cap_drop:
+ description:
+ - List of capabilities to drop from the container.
+ type: list
+ elements: str
+ cgroup_parent:
+ description:
+ - Specify the parent cgroup for the container.
+ type: str
+ version_added: 1.1.0
+ cleanup:
+ description:
+ - Use with I(detach=false) to remove the container after successful execution.
+ type: bool
+ default: no
+ command:
+ description:
+ - Command to execute when the container starts. A command may be either a string or a list.
+ - Prior to version 2.4, strings were split on commas.
+ type: raw
+ comparisons:
+ description:
+ - Allows to specify how properties of existing containers are compared with
+ module options to decide whether the container should be recreated / updated
+ or not.
+ - Only options which correspond to the state of a container as handled by the
+ Docker daemon can be specified, as well as C(networks).
+ - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore)
+ and C(allow_more_present).
+ - If C(strict) is specified, values are tested for equality, and changes always
+ result in updating or restarting. If C(ignore) is specified, changes are ignored.
+ - C(allow_more_present) is allowed only for lists, sets and dicts. If it is
+ specified for lists or sets, the container will only be updated or restarted if
+ the module option contains a value which is not present in the container's
+ options. If the option is specified for a dict, the container will only be updated
+ or restarted if the module option contains a key which isn't present in the
+ container's option, or if the value of a key present differs.
+ - The wildcard option C(*) can be used to set one of the default values C(strict)
+ or C(ignore) to *all* comparisons which are not explicitly set to other values.
+ - See the examples for details.
+ type: dict
+ container_default_behavior:
+ description:
+ - Various module options used to have default values. This causes problems with
+ containers which use different values for these options.
+ - The default value is C(compatibility), which will ensure that the default values
+ are used when the values are not explicitly specified by the user.
+ - From community.docker 2.0.0 on, the default value will switch to C(no_defaults). To avoid
+ deprecation warnings, please set I(container_default_behavior) to an explicit
+ value.
+ - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory),
+ I(paused), I(privileged), I(read_only) and I(tty) options.
+ type: str
+ choices:
+ - compatibility
+ - no_defaults
+ cpu_period:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) period.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpu_quota:
+ description:
+ - Limit CPU CFS (Completely Fair Scheduler) quota.
+ - See I(cpus) for an easier to use alternative.
+ type: int
+ cpus:
+ description:
+ - Specify how much of the available CPU resources a container can use.
+ - A value of C(1.5) means that at most one and a half CPU (core) will be used.
+ type: float
+ cpuset_cpus:
+ description:
+ - CPUs in which to allow execution C(1,3) or C(1-3).
+ type: str
+ cpuset_mems:
+ description:
+ - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1).
+ type: str
+ cpu_shares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ default_host_ip:
+ description:
+ - Define the default host IP to use.
+ - Must be an empty string, an IPv4 address, or an IPv6 address.
+ - With Docker 20.10.2 or newer, this should be set to an empty string (C("")) to avoid the
+ port bindings without an explicit IP address to only bind to IPv4.
+ See U(https://github.com/ansible-collections/community.docker/issues/70) for details.
+ - By default, the module will try to auto-detect this value from the C(bridge) network's
+ C(com.docker.network.bridge.host_binding_ipv4) option. If it cannot auto-detect it, it
+ will fall back to C(0.0.0.0).
+ type: str
+ version_added: 1.2.0
+ detach:
+ description:
+ - Enable detached mode to leave the container running in background.
+ - If disabled, the task will reflect the status of the container run (failed if the command failed).
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(yes).
+ type: bool
+ devices:
+ description:
+ - List of host device bindings to add to the container.
+ - "Each binding is a mapping expressed in the format C(<path_on_host>:<path_in_container>:<cgroup_permissions>)."
+ type: list
+ elements: str
+ device_read_bps:
+ description:
+ - "List of device path and read rate (bytes per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_write_bps:
+ description:
+ - "List of device and write rate (bytes per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit in format C(<number>[<unit>])."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ required: yes
+ device_read_iops:
+ description:
+ - "List of device and read rate (IO per second) from device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_write_iops:
+ description:
+ - "List of device and write rate (IO per second) to device."
+ type: list
+ elements: dict
+ suboptions:
+ path:
+ description:
+ - Device path in the container.
+ type: str
+ required: yes
+ rate:
+ description:
+ - "Device read limit."
+ - "Must be a positive integer."
+ type: int
+ required: yes
+ device_requests:
+ description:
+ - Allows to request additional resources, such as GPUs.
+ type: list
+ elements: dict
+ suboptions:
+ capabilities:
+ description:
+ - List of lists of strings to request capabilities.
+ - The top-level list entries are combined by OR, and for every list entry,
+ the entries in the list it contains are combined by AND.
+ - The driver tries to satisfy one of the sub-lists.
+ - Available capabilities for the C(nvidia) driver can be found at
+ U(https://github.com/NVIDIA/nvidia-container-runtime).
+ type: list
+ elements: list
+ count:
+ description:
+ - Number or devices to request.
+ - Set to C(-1) to request all available devices.
+ type: int
+ device_ids:
+ description:
+ - List of device IDs.
+ type: list
+ elements: str
+ driver:
+ description:
+ - Which driver to use for this device.
+ type: str
+ options:
+ description:
+ - Driver-specific options.
+ type: dict
+ version_added: 0.1.0
+ dns_opts:
+ description:
+ - List of DNS options.
+ type: list
+ elements: str
+ dns_servers:
+ description:
+ - List of custom DNS servers.
+ type: list
+ elements: str
+ dns_search_domains:
+ description:
+ - List of custom DNS search domains.
+ type: list
+ elements: str
+ domainname:
+ description:
+ - Container domainname.
+ type: str
+ env:
+ description:
+ - Dictionary of key,value pairs.
+ - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (e.g. C("true")) in order to avoid data loss.
+ type: dict
+ env_file:
+ description:
+ - Path to a file, present on the target, containing environment variables I(FOO=BAR).
+ - If variable also present in I(env), then the I(env) value will override.
+ type: path
+ entrypoint:
+ description:
+ - Command that overwrites the default C(ENTRYPOINT) of the image.
+ type: list
+ elements: str
+ etc_hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's C(/etc/hosts) file.
+ type: dict
+ exposed_ports:
+ description:
+ - List of additional container ports which informs Docker that the container
+ listens on the specified network ports at runtime.
+ - If the port is already exposed using C(EXPOSE) in a Dockerfile, it does not
+ need to be exposed again.
+ type: list
+ elements: str
+ aliases:
+ - exposed
+ - expose
+ force_kill:
+ description:
+ - Use the kill command when stopping a running container.
+ type: bool
+ default: no
+ aliases:
+ - forcekill
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work."
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ - The default used by the Docker daemon is C(30s).
+ type: str
+ retries:
+ description:
+ - Consecutive number of failures needed to report unhealthy.
+ - The default used by the Docker daemon is C(3).
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ - The default used by the Docker daemon is C(0s).
+ type: str
+ hostname:
+ description:
+ - The container's hostname.
+ type: str
+ ignore_image:
+ description:
+ - When I(state) is C(present) or C(started), the module compares the configuration of an existing
+ container to requested configuration. The evaluation includes the image version. If the image
+ version in the registry does not match the container, the container will be recreated. You can
+ stop this behavior by setting I(ignore_image) to C(True).
+ - "*Warning:* This option is ignored if C(image: ignore) or C(*: ignore) is specified in the
+ I(comparisons) option."
+ type: bool
+ default: no
+ image:
+ description:
+ - Repository path and tag used to create the container. If an image is not found or pull is true, the image
+ will be pulled from the registry. If no tag is included, C(latest) will be used.
+ - Can also be an image ID. If this is the case, the image is assumed to be available locally.
+ The I(pull) option is ignored for this case.
+ type: str
+ init:
+ description:
+ - Run an init inside the container that forwards signals and reaps processes.
+ - This option requires Docker API >= 1.25.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ interactive:
+ description:
+ - Keep stdin open after a container is launched, even if not attached.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ipc_mode:
+ description:
+ - Set the IPC mode for the container.
+ - Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use
+ the host's IPC namespace within the container.
+ type: str
+ keep_volumes:
+ description:
+ - Retain anonymous volumes associated with a removed container.
+ type: bool
+ default: yes
+ kill_signal:
+ description:
+ - Override default signal used to kill a running container.
+ type: str
+ kernel_memory:
+ description:
+ - "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ type: dict
+ links:
+ description:
+ - List of name aliases for linked containers in the format C(container_name:alias).
+ - Setting this will force container to be restarted.
+ type: list
+ elements: str
+ log_driver:
+ description:
+ - Specify the logging driver. Docker uses C(json-file) by default.
+ - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices.
+ type: str
+ log_options:
+ description:
+ - Dictionary of options specific to the chosen I(log_driver).
+ - See U(https://docs.docker.com/engine/admin/logging/overview/) for details.
+ type: dict
+ aliases:
+ - log_opt
+ mac_address:
+ description:
+ - Container MAC address (e.g. 92:d0:c6:0a:29:33).
+ type: str
+ memory:
+ description:
+ - "Memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C("0").
+ type: str
+ memory_reservation:
+ description:
+ - "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swap:
+ description:
+ - "Total memory limit (memory + swap) in format C(<number>[<unit>]).
+ Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B),
+ C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes.
+ type: str
+ memory_swappiness:
+ description:
+ - Tune a container's memory swappiness behavior. Accepts an integer between 0 and 100.
+ - If not set, the value will be remain the same if container exists and will be inherited
+ from the host machine if it is (re-)created.
+ type: int
+ mounts:
+ type: list
+ elements: dict
+ description:
+ - Specification for mounts to be added to the container. More powerful alternative to I(volumes).
+ suboptions:
+ target:
+ description:
+ - Path inside the container.
+ type: str
+ required: true
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ type: str
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows.
+ type: str
+ choices:
+ - bind
+ - npipe
+ - tmpfs
+ - volume
+ default: volume
+ read_only:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ consistency:
+ description:
+ - The consistency requirement for the mount.
+ type: str
+ choices:
+ - cached
+ - consistent
+ - default
+ - delegated
+ propagation:
+ description:
+ - Propagation mode. Only valid for the C(bind) type.
+ type: str
+ choices:
+ - private
+ - rprivate
+ - shared
+ - rshared
+ - slave
+ - rslave
+ no_copy:
+ description:
+ - False if the volume should be populated with the data from the target. Only valid for the C(volume) type.
+ - The default value is C(false).
+ type: bool
+ labels:
+ description:
+ - User-defined name and labels for the volume. Only valid for the C(volume) type.
+ type: dict
+ volume_driver:
+ description:
+ - Specify the volume driver. Only valid for the C(volume) type.
+ - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: str
+ volume_options:
+ description:
+ - Dictionary of options specific to the chosen volume_driver. See
+ L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details.
+ type: dict
+ tmpfs_size:
+ description:
+ - "The size for the tmpfs mount in bytes in format <number>[<unit>]."
+ - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - "Omitting the unit defaults to bytes."
+ type: str
+ tmpfs_mode:
+ description:
+ - The permission mode for the tmpfs mount.
+ type: str
+ name:
+ description:
+ - Assign a name to a new container or match an existing container.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+ network_mode:
+ description:
+ - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default).
+ - "*Note* that from community.docker 2.0.0 on, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network,
+ the default value for I(network_mode) will be the name of the first network in the I(networks) list. You can prevent this
+ by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if
+ I(network_mode) is not specified."
+ type: str
+ userns_mode:
+ description:
+ - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string.
+ type: str
+ networks:
+ description:
+ - List of networks the container belongs to.
+ - For examples of the data structure and usage see EXAMPLES below.
+ - To remove a container from one or more networks, use the I(purge_networks) option.
+ - If I(networks_cli_compatible) is set to C(false), this will not remove the default network if I(networks) is specified.
+ This is different from the behavior of C(docker run ...). You need to explicitly use I(purge_networks) to enforce
+ the removal of the default network (and all other networks not explicitly mentioned in I(networks)) in that case.
+ type: list
+ elements: dict
+ suboptions:
+ name:
+ description:
+ - The network's name.
+ type: str
+ required: yes
+ ipv4_address:
+ description:
+ - The container's IPv4 address in this network.
+ type: str
+ ipv6_address:
+ description:
+ - The container's IPv6 address in this network.
+ type: str
+ links:
+ description:
+ - A list of containers to link to.
+ type: list
+ elements: str
+ aliases:
+ description:
+ - List of aliases for this container in this network. These names
+ can be used in the network to reach this container.
+ type: list
+ elements: str
+ networks_cli_compatible:
+ description:
+ - "If I(networks_cli_compatible) is set to C(yes) (default), this module will behave as
+ C(docker run --network) and will *not* add the default network if I(networks) is
+ specified. If I(networks) is not specified, the default network will be attached."
+ - "When I(networks_cli_compatible) is set to C(no) and networks are provided to the module
+ via the I(networks) option, the module behaves differently than C(docker run --network):
+ C(docker run --network other) will create a container with network C(other) attached,
+ but the default network not attached. This module with I(networks: {name: other}) will
+ create a container with both C(default) and C(other) attached. If I(purge_networks) is
+ set to C(yes), the C(default) network will be removed afterwards."
+ - "*Note* that docker CLI also sets I(network_mode) to the name of the first network
+ added if C(--network) is specified. For more compatibility with docker CLI, you
+ explicitly have to set I(network_mode) to the name of the first network you're
+ adding. This behavior will change for community.docker 2.0.0: then I(network_mode) will
+ automatically be set to the first network name in I(networks) if I(network_mode)
+ is not specified, I(networks) has at least one entry and I(networks_cli_compatible)
+ is C(true)."
+ type: bool
+ default: true
+ oom_killer:
+ description:
+ - Whether or not to disable OOM Killer for the container.
+ type: bool
+ oom_score_adj:
+ description:
+ - An integer value containing the score given to the container in order to tune
+ OOM killer preferences.
+ type: int
+ output_logs:
+ description:
+ - If set to true, output of the container command will be printed.
+ - Only effective when I(log_driver) is set to C(json-file) or C(journald).
+ type: bool
+ default: no
+ paused:
+ description:
+ - Use with the started state to pause running processes inside the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ pid_mode:
+ description:
+ - Set the PID namespace mode for the container.
+ - Note that Docker SDK for Python < 2.0 only supports C(host). Newer versions of the
+ Docker SDK for Python (docker) allow all values supported by the Docker daemon.
+ type: str
+ pids_limit:
+ description:
+ - Set PIDs limit for the container. It accepts an integer value.
+ - Set C(-1) for unlimited PIDs.
+ type: int
+ privileged:
+ description:
+ - Give extended privileges to the container.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ published_ports:
+ description:
+ - List of ports to publish from the container to the host.
+ - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a
+ container port, 9000 is a host port, and 0.0.0.0 is a host interface."
+ - Port ranges can be used for source and destination ports. If two ranges with
+ different lengths are specified, the shorter range will be used.
+ Since community.general 0.2.0, if the source port range has length 1, the port will not be assigned
+ to the first port of the destination range, but to a free port in that range. This is the
+ same behavior as for C(docker) command line utility.
+ - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are *not* allowed. This
+ is different from the C(docker) command line utility. Use the L(dig lookup,../lookup/dig.html)
+ to resolve hostnames."
+ - A value of C(all) will publish all exposed container ports to random host ports, ignoring
+ any other mappings.
+ - If I(networks) parameter is provided, will inspect each network to see if there exists
+ a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4).
+ If such a network is found, then published ports where no host IP address is specified
+ will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4).
+ Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4)
+ value encountered in the list of I(networks) is the one that will be used.
+ type: list
+ elements: str
+ aliases:
+ - ports
+ pull:
+ description:
+ - If true, always pull the latest version of an image. Otherwise, will only pull an image
+ when missing.
+ - "*Note:* images are only pulled when specified by name. If the image is specified
+ as a image ID (hash), it cannot be pulled."
+ type: bool
+ default: no
+ purge_networks:
+ description:
+ - Remove the container from ALL networks not included in I(networks) parameter.
+ - Any default networks such as C(bridge), if not found in I(networks), will be removed as well.
+ type: bool
+ default: no
+ read_only:
+ description:
+ - Mount the container's root file system as read-only.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ recreate:
+ description:
+ - Use with present and started states to force the re-creation of an existing container.
+ type: bool
+ default: no
+ removal_wait_timeout:
+ description:
+ - When removing an existing container, the docker daemon API call exists after the container
+ is scheduled for removal. Removal usually is very fast, but it can happen that during high I/O
+ load, removal can take longer. By default, the module will wait until the container has been
+ removed before trying to (re-)create it, however long this takes.
+ - By setting this option, the module will wait at most this many seconds for the container to be
+ removed. If the container is still in the removal phase after this many seconds, the module will
+ fail.
+ type: float
+ restart:
+ description:
+ - Use with started state to force a matching container to be stopped and restarted.
+ type: bool
+ default: no
+ restart_policy:
+ description:
+ - Container restart policy.
+ - Place quotes around C(no) option.
+ type: str
+ choices:
+ - 'no'
+ - 'on-failure'
+ - 'always'
+ - 'unless-stopped'
+ restart_retries:
+ description:
+ - Use with restart policy to control maximum number of restart attempts.
+ type: int
+ runtime:
+ description:
+ - Runtime to use for the container.
+ type: str
+ shm_size:
+ description:
+ - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M).
+ type: str
+ security_opts:
+ description:
+ - List of security options in the form of C("label:user:User").
+ type: list
+ elements: str
+ state:
+ description:
+ - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container
+ rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.'
+ - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no
+ container matches the name, a container will be created. If a container matches the name but the provided configuration
+ does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created
+ with the requested config.'
+ - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running
+ state. Use I(restart) to force a matching container to be stopped and restarted.'
+ - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped
+ state.'
+ - To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the
+ image version will be taken into account, you can also use the I(ignore_image) option.
+ - Use the I(recreate) option to always force re-creation of a matching container, even if it is running.
+ - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is
+ C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ - Use I(keep_volumes) to retain anonymous volumes associated with a removed container.
+ type: str
+ default: started
+ choices:
+ - absent
+ - present
+ - stopped
+ - started
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ type: str
+ stop_timeout:
+ description:
+ - Number of seconds to wait for the container to stop before sending C(SIGKILL).
+ When the container is created by this module, its C(StopTimeout) configuration
+ will be set to this value.
+ - When the container is stopped, will be used as a timeout for stopping the
+ container. In case the container has a custom C(StopTimeout) configuration,
+ the behavior depends on the version of the docker daemon. New versions of
+ the docker daemon will always use the container's configured C(StopTimeout)
+ value if it has been configured.
+ type: int
+ tmpfs:
+ description:
+ - Mount a tmpfs directory.
+ type: list
+ elements: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - If I(container_default_behavior) is set to C(compatiblity) (the default value), this
+ option has a default of C(no).
+ type: bool
+ ulimits:
+ description:
+ - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)."
+ type: list
+ elements: str
+ sysctls:
+ description:
+ - Dictionary of key,value pairs.
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used and optionally the groupname or GID for the specified command.
+ - "Can be of the forms C(user), C(user:group), C(uid), C(uid:gid), C(user:gid) or C(uid:group)."
+ type: str
+ uts:
+ description:
+ - Set the UTS namespace mode for the container.
+ type: str
+ volumes:
+ description:
+ - List of volumes to mount within the container.
+ - "Use docker CLI-style syntax: C(/host:/container[:mode])"
+ - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent),
+ C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and
+ C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes."
+ - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume.
+ - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw),
+ C(z), and C(Z)."
+ type: list
+ elements: str
+ volume_driver:
+ description:
+ - The container volume driver.
+ type: str
+ volumes_from:
+ description:
+ - List of container names or IDs to get volumes from.
+ type: list
+ elements: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ type: str
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+author:
+ - "Cove Schneider (@cove)"
+ - "Joshua Conner (@joshuaconner)"
+ - "Pavel Antonov (@softzilla)"
+ - "Thomas Steinbach (@ThomasSteinbach)"
+ - "Philippe Jandot (@zfil)"
+ - "Daan Oosterveld (@dusdanig)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Kassian Sun (@kassiansun)"
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Create a data container
+ community.docker.docker_container:
+ name: mydata
+ image: busybox
+ volumes:
+ - /data
+
+- name: Re-create a redis container
+ community.docker.docker_container:
+ name: myredis
+ image: redis
+ command: redis-server --appendonly yes
+ state: present
+ recreate: yes
+ exposed_ports:
+ - 6379
+ volumes_from:
+ - mydata
+
+- name: Restart a container
+ community.docker.docker_container:
+ name: myapplication
+ image: someuser/appimage
+ state: started
+ restart: yes
+ links:
+ - "myredis:aliasedredis"
+ devices:
+ - "/dev/sda:/dev/xvda:rwm"
+ ports:
+ # Publish container port 9000 as host port 8080
+ - "8080:9000"
+ # Publish container UDP port 9001 as host port 8081 on interface 127.0.0.1
+ - "127.0.0.1:8081:9001/udp"
+ # Publish container port 9002 as a random host port
+ - "9002"
+ # Publish container port 9003 as a free host port in range 8000-8100
+ # (the host port will be selected by the Docker daemon)
+ - "8000-8100:9003"
+ # Publish container ports 9010-9020 to host ports 7000-7010
+ - "7000-7010:9010-9020"
+ env:
+ SECRET_KEY: "ssssh"
+ # Values which might be parsed as numbers, booleans or other types by the YAML parser need to be quoted
+ BOOLEAN_KEY: "yes"
+
+- name: Container present
+ community.docker.docker_container:
+ name: mycontainer
+ state: present
+ image: ubuntu:14.04
+ command: sleep infinity
+
+- name: Stop a container
+ community.docker.docker_container:
+ name: mycontainer
+ state: stopped
+
+- name: Start 4 load-balanced containers
+ community.docker.docker_container:
+ name: "container{{ item }}"
+ recreate: yes
+ image: someuser/anotherappimage
+ command: sleep 1d
+ with_sequence: count=4
+
+- name: Remove container
+ community.docker.docker_container:
+ name: ohno
+ state: absent
+
+- name: Syslogging output
+ community.docker.docker_container:
+ name: myservice
+ image: busybox
+ log_driver: syslog
+ log_options:
+ syslog-address: tcp://my-syslog-server:514
+ syslog-facility: daemon
+ # NOTE: in Docker 1.13+ the "syslog-tag" option was renamed to "tag" for
+ # older docker installs, use "syslog-tag" instead
+ tag: myservice
+
+- name: Create db container and connect to network
+ community.docker.docker_container:
+ name: db_test
+ image: "postgres:latest"
+ networks:
+ - name: "{{ docker_network_name }}"
+
+- name: Start container, connect to network and link
+ community.docker.docker_container:
+ name: sleeper
+ image: ubuntu:14.04
+ networks:
+ - name: TestingNet
+ ipv4_address: "172.1.1.100"
+ aliases:
+ - sleepyzz
+ links:
+ - db_test:db
+ - name: TestingNet2
+
+- name: Start a container with a command
+ community.docker.docker_container:
+ name: sleepy
+ image: ubuntu:14.04
+ command: ["sleep", "infinity"]
+
+- name: Add container to networks
+ community.docker.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ ipv4_address: 172.1.1.18
+ links:
+ - sleeper
+ - name: TestingNet2
+ ipv4_address: 172.1.10.20
+
+- name: Update network with aliases
+ community.docker.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet
+ aliases:
+ - sleepyz
+ - zzzz
+
+- name: Remove container from one network
+ community.docker.docker_container:
+ name: sleepy
+ networks:
+ - name: TestingNet2
+ purge_networks: yes
+
+- name: Remove container from all networks
+ community.docker.docker_container:
+ name: sleepy
+ purge_networks: yes
+
+- name: Start a container and use an env file
+ community.docker.docker_container:
+ name: agent
+ image: jenkinsci/ssh-slave
+ env_file: /var/tmp/jenkins/agent.env
+
+- name: Create a container with limited capabilities
+ community.docker.docker_container:
+ name: sleepy
+ image: ubuntu:16.04
+ command: sleep infinity
+ capabilities:
+ - sys_time
+ cap_drop:
+ - all
+
+- name: Finer container restart/update control
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ volumes:
+ - /tmp:/tmp
+ comparisons:
+ image: ignore # don't restart containers with older versions of the image
+ env: strict # we want precisely this environment
+ volumes: allow_more_present # if there are more volumes, that's ok, as long as `/tmp:/tmp` is there
+
+- name: Finer container restart/update control II
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ env:
+ arg1: "true"
+ arg2: "whatever"
+ comparisons:
+ '*': ignore # by default, ignore *all* options (including image)
+ env: strict # except for environment variables; there, we want to be strict
+
+- name: Start container with healthstatus
+ community.docker.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Remove healthcheck from container
+ community.docker.docker_container:
+ name: nginx-proxy
+ image: nginx:1.13
+ state: started
+ healthcheck:
+ # The "NONE" check needs to be specified
+ test: ["NONE"]
+
+- name: Start container with block device read limit
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_read_bps:
+ # Limit read rate for /dev/sda to 20 mebibytes per second
+ - path: /dev/sda
+ rate: 20M
+ device_read_iops:
+ # Limit read rate for /dev/sdb to 300 IO per second
+ - path: /dev/sdb
+ rate: 300
+
+- name: Start container with GPUs
+ community.docker.docker_container:
+ name: test
+ image: ubuntu:18.04
+ state: started
+ device_requests:
+ - # Add some specific devices to this container
+ device_ids:
+ - '0'
+ - 'GPU-3a23c669-1f69-c64e-cf85-44e9b07e7a2a'
+ - # Add nVidia GPUs to this container
+ driver: nvidia
+ count: -1 # this means we want all
+ capabilities:
+ # We have one OR condition: 'gpu' AND 'utility'
+ - - gpu
+ - utility
+ # See https://github.com/NVIDIA/nvidia-container-runtime#supported-driver-capabilities
+ # for a list of capabilities supported by the nvidia driver
+'''
+
+RETURN = '''
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Before 2.3 this was C(ansible_docker_container) but was renamed in 2.3 to C(docker_container) due to
+ conflicts with the connection plugin.
+ - Empty if I(state) is C(absent)
+ - If I(detached) is C(false), will include C(Output) attribute containing any output from container run.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+status:
+ description:
+ - In case a container is started without detaching, this contains the exit code of the process in the container.
+ - Before community.docker 1.1.0, this was only returned when non-zero.
+ returned: when I(state) is C(started) and I(detached) is C(false), and when waiting for the container result did not fail
+ type: int
+ sample: 0
+'''
+
+import os
+import re
+import shlex
+import traceback
+from distutils.version import LooseVersion
+from time import sleep
+
+from ansible.module_utils.common.text.formatters import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_native, to_text
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ compare_generic,
+ is_image_name_id,
+ sanitize_result,
+ clean_dict_booleans_for_docker_api,
+ omit_none_from_dict,
+ parse_healthcheck,
+ DOCKER_COMMON_ARGS,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from ansible_collections.community.docker.plugins.module_utils.common import docker_version
+ if LooseVersion(docker_version) >= LooseVersion('1.10.0'):
+ from docker.types import Ulimit, LogConfig
+ from docker import types as docker_types
+ else:
+ from docker.utils.types import Ulimit, LogConfig
+ from docker.errors import DockerException, APIError, NotFound
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+REQUIRES_CONVERSION_TO_BYTES = [
+ 'kernel_memory',
+ 'memory',
+ 'memory_reservation',
+ 'memory_swap',
+ 'shm_size'
+]
+
+
+def is_volume_permissions(mode):
+ for part in mode.split(','):
+ if part not in ('rw', 'ro', 'z', 'Z', 'consistent', 'delegated', 'cached', 'rprivate', 'private', 'rshared', 'shared', 'rslave', 'slave', 'nocopy'):
+ return False
+ return True
+
+
+def parse_port_range(range_or_port, client):
+ '''
+ Parses a string containing either a single port or a range of ports.
+
+ Returns a list of integers for each port in the list.
+ '''
+ if '-' in range_or_port:
+ try:
+ start, end = [int(port) for port in range_or_port.split('-')]
+ except Exception:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ if end < start:
+ client.fail('Invalid port range: "{0}"'.format(range_or_port))
+ return list(range(start, end + 1))
+ else:
+ try:
+ return [int(range_or_port)]
+ except Exception:
+ client.fail('Invalid port: "{0}"'.format(range_or_port))
+
+
+def split_colon_ipv6(text, client):
+ '''
+ Split string by ':', while keeping IPv6 addresses in square brackets in one component.
+ '''
+ if '[' not in text:
+ return text.split(':')
+ start = 0
+ result = []
+ while start < len(text):
+ i = text.find('[', start)
+ if i < 0:
+ result.extend(text[start:].split(':'))
+ break
+ j = text.find(']', i)
+ if j < 0:
+ client.fail('Cannot find closing "]" in input "{0}" for opening "[" at index {1}!'.format(text, i + 1))
+ result.extend(text[start:i].split(':'))
+ k = text.find(':', j)
+ if k < 0:
+ result[-1] += text[i:]
+ start = len(text)
+ else:
+ result[-1] += text[i:k]
+ if k == len(text):
+ result.append('')
+ break
+ start = k + 1
+ return result
+
+
+class TaskParameters(DockerBaseClass):
+ '''
+ Access and parse module parameters
+ '''
+
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.auto_remove = None
+ self.blkio_weight = None
+ self.capabilities = None
+ self.cap_drop = None
+ self.cleanup = None
+ self.command = None
+ self.cpu_period = None
+ self.cpu_quota = None
+ self.cpus = None
+ self.cpuset_cpus = None
+ self.cpuset_mems = None
+ self.cpu_shares = None
+ self.debug = None
+ self.default_host_ip = None
+ self.detach = None
+ self.devices = None
+ self.device_read_bps = None
+ self.device_write_bps = None
+ self.device_read_iops = None
+ self.device_write_iops = None
+ self.device_requests = None
+ self.dns_servers = None
+ self.dns_opts = None
+ self.dns_search_domains = None
+ self.domainname = None
+ self.env = None
+ self.env_file = None
+ self.entrypoint = None
+ self.etc_hosts = None
+ self.exposed_ports = None
+ self.force_kill = None
+ self.groups = None
+ self.healthcheck = None
+ self.hostname = None
+ self.ignore_image = None
+ self.image = None
+ self.init = None
+ self.interactive = None
+ self.ipc_mode = None
+ self.keep_volumes = None
+ self.kernel_memory = None
+ self.kill_signal = None
+ self.labels = None
+ self.links = None
+ self.log_driver = None
+ self.output_logs = None
+ self.log_options = None
+ self.mac_address = None
+ self.memory = None
+ self.memory_reservation = None
+ self.memory_swap = None
+ self.memory_swappiness = None
+ self.mounts = None
+ self.name = None
+ self.network_mode = None
+ self.userns_mode = None
+ self.networks = None
+ self.networks_cli_compatible = None
+ self.oom_killer = None
+ self.oom_score_adj = None
+ self.paused = None
+ self.pid_mode = None
+ self.pids_limit = None
+ self.privileged = None
+ self.purge_networks = None
+ self.pull = None
+ self.read_only = None
+ self.recreate = None
+ self.removal_wait_timeout = None
+ self.restart = None
+ self.restart_retries = None
+ self.restart_policy = None
+ self.runtime = None
+ self.shm_size = None
+ self.security_opts = None
+ self.state = None
+ self.stop_signal = None
+ self.stop_timeout = None
+ self.tmpfs = None
+ self.tty = None
+ self.user = None
+ self.uts = None
+ self.volumes = None
+ self.volume_binds = dict()
+ self.volumes_from = None
+ self.volume_driver = None
+ self.working_dir = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+ self.comparisons = client.comparisons
+
+ # If state is 'absent', parameters do not have to be parsed or interpreted.
+ # Only the container's name is needed.
+ if self.state == 'absent':
+ return
+
+ if self.default_host_ip:
+ valid_ip = False
+ if re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', self.default_host_ip):
+ valid_ip = True
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', self.default_host_ip):
+ valid_ip = True
+ if re.match(r'^[0-9a-fA-F:]+$', self.default_host_ip):
+ self.default_host_ip = '[{0}]'.format(self.default_host_ip)
+ valid_ip = True
+ if not valid_ip:
+ self.fail('The value of default_host_ip must be an empty string, an IPv4 address, '
+ 'or an IPv6 address. Got "{0}" instead.'.format(self.default_host_ip))
+
+ if self.cpus is not None:
+ self.cpus = int(round(self.cpus * 1E9))
+
+ if self.groups:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ self.groups = [to_text(g, errors='surrogate_or_strict') for g in self.groups]
+
+ for param_name in REQUIRES_CONVERSION_TO_BYTES:
+ if client.module.params.get(param_name):
+ try:
+ setattr(self, param_name, human_to_bytes(client.module.params.get(param_name)))
+ except ValueError as exc:
+ self.fail("Failed to convert %s to bytes: %s" % (param_name, exc))
+
+ self.publish_all_ports = False
+ self.published_ports = self._parse_publish_ports()
+ if self.published_ports == 'all':
+ self.publish_all_ports = True
+ self.published_ports = None
+
+ self.ports = self._parse_exposed_ports(self.published_ports)
+ self.log("expose ports:")
+ self.log(self.ports, pretty_print=True)
+
+ self.links = self._parse_links(self.links)
+
+ if self.volumes:
+ self.volumes = self._expand_host_paths()
+
+ self.tmpfs = self._parse_tmpfs()
+ self.env = self._get_environment()
+ self.ulimits = self._parse_ulimits()
+ self.sysctls = self._parse_sysctls()
+ self.log_config = self._parse_log_config()
+ try:
+ self.healthcheck, self.disable_healthcheck = parse_healthcheck(self.healthcheck)
+ except ValueError as e:
+ self.fail(to_native(e))
+
+ self.exp_links = None
+ self.volume_binds = self._get_volume_binds(self.volumes)
+ self.pid_mode = self._replace_container_names(self.pid_mode)
+ self.ipc_mode = self._replace_container_names(self.ipc_mode)
+ self.network_mode = self._replace_container_names(self.network_mode)
+
+ self.log("volumes:")
+ self.log(self.volumes, pretty_print=True)
+ self.log("volume binds:")
+ self.log(self.volume_binds, pretty_print=True)
+
+ if self.networks:
+ for network in self.networks:
+ network['id'] = self._get_network_id(network['name'])
+ if not network['id']:
+ self.fail("Parameter error: network named %s could not be found. Does it exist?" % network['name'])
+ if network.get('links'):
+ network['links'] = self._parse_links(network['links'])
+
+ if self.mac_address:
+ # Ensure the MAC address uses colons instead of hyphens for later comparison
+ self.mac_address = self.mac_address.replace('-', ':')
+
+ if self.entrypoint:
+ # convert from list to str.
+ self.entrypoint = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.entrypoint])
+
+ if self.command:
+ # convert from list to str
+ if isinstance(self.command, list):
+ self.command = ' '.join([to_text(x, errors='surrogate_or_strict') for x in self.command])
+
+ self.mounts_opt, self.expected_mounts = self._process_mounts()
+
+ self._check_mount_target_collisions()
+
+ for param_name in ["device_read_bps", "device_write_bps"]:
+ if client.module.params.get(param_name):
+ self._process_rate_bps(option=param_name)
+
+ for param_name in ["device_read_iops", "device_write_iops"]:
+ if client.module.params.get(param_name):
+ self._process_rate_iops(option=param_name)
+
+ if self.device_requests:
+ for dr_index, dr in enumerate(self.device_requests):
+ # Make sure that capabilities are lists of lists of strings
+ if dr['capabilities']:
+ for or_index, or_list in enumerate(dr['capabilities']):
+ for and_index, and_term in enumerate(or_list):
+ if not isinstance(and_term, string_types):
+ self.fail(
+ "device_requests[{0}].capabilities[{1}][{2}] is not a string".format(
+ dr_index, or_index, and_index))
+ or_list[and_index] = to_native(and_term)
+ # Make sure that options is a dictionary mapping strings to strings
+ if dr['options']:
+ dr['options'] = clean_dict_booleans_for_docker_api(dr['options'])
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ @property
+ def update_parameters(self):
+ '''
+ Returns parameters used to update a container
+ '''
+
+ update_parameters = dict(
+ blkio_weight='blkio_weight',
+ cpu_period='cpu_period',
+ cpu_quota='cpu_quota',
+ cpu_shares='cpu_shares',
+ cpuset_cpus='cpuset_cpus',
+ cpuset_mems='cpuset_mems',
+ mem_limit='memory',
+ mem_reservation='memory_reservation',
+ memswap_limit='memory_swap',
+ kernel_memory='kernel_memory',
+ restart_policy='restart_policy',
+ )
+
+ result = dict()
+ for key, value in update_parameters.items():
+ if getattr(self, value, None) is not None:
+ if key == 'restart_policy' and self.client.option_minimal_versions[value]['supported']:
+ restart_policy = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+ result[key] = restart_policy
+ elif self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+ return result
+
+ @property
+ def create_parameters(self):
+ '''
+ Returns parameters used to create a container
+ '''
+ create_params = dict(
+ command='command',
+ domainname='domainname',
+ hostname='hostname',
+ user='user',
+ detach='detach',
+ stdin_open='interactive',
+ tty='tty',
+ ports='ports',
+ environment='env',
+ name='name',
+ entrypoint='entrypoint',
+ mac_address='mac_address',
+ labels='labels',
+ stop_signal='stop_signal',
+ working_dir='working_dir',
+ stop_timeout='stop_timeout',
+ healthcheck='healthcheck',
+ )
+
+ if self.client.docker_py_version < LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ create_params['cpu_shares'] = 'cpu_shares'
+ create_params['volume_driver'] = 'volume_driver'
+
+ result = dict(
+ host_config=self._host_config(),
+ volumes=self._get_mounts(),
+ )
+
+ for key, value in create_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ result[key] = getattr(self, value)
+
+ if self.disable_healthcheck:
+ # Make sure image's health check is overridden
+ result['healthcheck'] = {'test': ['NONE']}
+
+ if self.networks_cli_compatible and self.networks:
+ network = self.networks[0]
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if network.get(para):
+ params[para] = network[para]
+ network_config = dict()
+ network_config[network['name']] = self.client.create_endpoint_config(**params)
+ result['networking_config'] = self.client.create_networking_config(network_config)
+ return result
+
+ def _expand_host_paths(self):
+ new_vols = []
+ for vol in self.volumes:
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if re.match(r'[.~]', host):
+ host = os.path.abspath(os.path.expanduser(host))
+ new_vols.append("%s:%s:%s" % (host, container, mode))
+ continue
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]) and re.match(r'[.~]', parts[0]):
+ host = os.path.abspath(os.path.expanduser(parts[0]))
+ new_vols.append("%s:%s:rw" % (host, parts[1]))
+ continue
+ new_vols.append(vol)
+ return new_vols
+
+ def _get_mounts(self):
+ '''
+ Return a list of container mounts.
+ :return:
+ '''
+ result = []
+ if self.volumes:
+ for vol in self.volumes:
+ # Only pass anonymous volumes to create container
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ result.append(vol)
+ self.log("mounts:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _host_config(self):
+ '''
+ Returns parameters used to create a HostConfig object
+ '''
+
+ host_config_params = dict(
+ port_bindings='published_ports',
+ publish_all_ports='publish_all_ports',
+ links='links',
+ privileged='privileged',
+ cgroup_parent='cgroup_parent',
+ dns='dns_servers',
+ dns_opt='dns_opts',
+ dns_search='dns_search_domains',
+ binds='volume_binds',
+ volumes_from='volumes_from',
+ network_mode='network_mode',
+ userns_mode='userns_mode',
+ cap_add='capabilities',
+ cap_drop='cap_drop',
+ extra_hosts='etc_hosts',
+ read_only='read_only',
+ ipc_mode='ipc_mode',
+ security_opt='security_opts',
+ ulimits='ulimits',
+ sysctls='sysctls',
+ log_config='log_config',
+ mem_limit='memory',
+ memswap_limit='memory_swap',
+ mem_swappiness='memory_swappiness',
+ oom_score_adj='oom_score_adj',
+ oom_kill_disable='oom_killer',
+ shm_size='shm_size',
+ group_add='groups',
+ devices='devices',
+ pid_mode='pid_mode',
+ tmpfs='tmpfs',
+ init='init',
+ uts_mode='uts',
+ runtime='runtime',
+ auto_remove='auto_remove',
+ device_read_bps='device_read_bps',
+ device_write_bps='device_write_bps',
+ device_read_iops='device_read_iops',
+ device_write_iops='device_write_iops',
+ pids_limit='pids_limit',
+ mounts='mounts',
+ nano_cpus='cpus',
+ )
+
+ if self.client.docker_py_version >= LooseVersion('1.9') and self.client.docker_api_version >= LooseVersion('1.22'):
+ # blkio_weight can always be updated, but can only be set on creation
+ # when Docker SDK for Python and Docker API are new enough
+ host_config_params['blkio_weight'] = 'blkio_weight'
+
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ # cpu_shares and volume_driver moved to create_host_config in > 3
+ host_config_params['cpu_shares'] = 'cpu_shares'
+ host_config_params['volume_driver'] = 'volume_driver'
+
+ params = dict()
+ for key, value in host_config_params.items():
+ if getattr(self, value, None) is not None:
+ if self.client.option_minimal_versions[value]['supported']:
+ params[key] = getattr(self, value)
+
+ if self.restart_policy:
+ params['restart_policy'] = dict(Name=self.restart_policy,
+ MaximumRetryCount=self.restart_retries)
+
+ if 'mounts' in params:
+ params['mounts'] = self.mounts_opt
+
+ if self.device_requests is not None:
+ params['device_requests'] = [dict((k, v) for k, v in dr.items() if v is not None) for dr in self.device_requests]
+
+ return self.client.create_host_config(**params)
+
+ def get_default_host_ip(self):
+ if self.default_host_ip is not None:
+ return self.default_host_ip
+ ip = '0.0.0.0'
+ if not self.networks:
+ return ip
+ for net in self.networks:
+ if net.get('name'):
+ try:
+ network = self.client.inspect_network(net['name'])
+ if network.get('Driver') == 'bridge' and \
+ network.get('Options', {}).get('com.docker.network.bridge.host_binding_ipv4'):
+ ip = network['Options']['com.docker.network.bridge.host_binding_ipv4']
+ break
+ except NotFound as nfe:
+ self.client.fail(
+ "Cannot inspect the network '{0}' to determine the default IP: {1}".format(net['name'], nfe),
+ exception=traceback.format_exc()
+ )
+ return ip
+
+ def _parse_publish_ports(self):
+ '''
+ Parse ports from docker CLI syntax
+ '''
+ if self.published_ports is None:
+ return None
+
+ if 'all' in self.published_ports:
+ if len(self.published_ports) > 1:
+ self.client.module.deprecate(
+ 'Specifying "all" in published_ports together with port mappings is not properly '
+ 'supported by the module. The port mappings are currently ignored. Please specify '
+ 'only port mappings, or the value "all". The behavior for mixed usage will either '
+ 'be forbidden in version 2.0.0, or properly handled. In any case, the way you '
+ 'currently use the module will change in a breaking way',
+ collection_name='community.docker', version='2.0.0')
+ return 'all'
+
+ default_ip = self.get_default_host_ip()
+
+ binds = {}
+ for port in self.published_ports:
+ parts = split_colon_ipv6(to_text(port, errors='surrogate_or_strict'), self.client)
+ container_port = parts[-1]
+ protocol = ''
+ if '/' in container_port:
+ container_port, protocol = parts[-1].split('/')
+ container_ports = parse_port_range(container_port, self.client)
+
+ p_len = len(parts)
+ if p_len == 1:
+ port_binds = len(container_ports) * [(default_ip,)]
+ elif p_len == 2:
+ if len(container_ports) == 1:
+ port_binds = [(default_ip, parts[0])]
+ else:
+ port_binds = [(default_ip, port) for port in parse_port_range(parts[0], self.client)]
+ elif p_len == 3:
+ # We only allow IPv4 and IPv6 addresses for the bind address
+ ipaddr = parts[0]
+ if not re.match(r'^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+$', parts[0]) and not re.match(r'^\[[0-9a-fA-F:]+(?:|%[^\]/]+)\]$', ipaddr):
+ self.fail(('Bind addresses for published ports must be IPv4 or IPv6 addresses, not hostnames. '
+ 'Use the dig lookup to resolve hostnames. (Found hostname: {0})').format(ipaddr))
+ if re.match(r'^\[[0-9a-fA-F:]+\]$', ipaddr):
+ ipaddr = ipaddr[1:-1]
+ if parts[1]:
+ if len(container_ports) == 1:
+ port_binds = [(ipaddr, parts[1])]
+ else:
+ port_binds = [(ipaddr, port) for port in parse_port_range(parts[1], self.client)]
+ else:
+ port_binds = len(container_ports) * [(ipaddr,)]
+
+ for bind, container_port in zip(port_binds, container_ports):
+ idx = '{0}/{1}'.format(container_port, protocol) if protocol else container_port
+ if idx in binds:
+ old_bind = binds[idx]
+ if isinstance(old_bind, list):
+ old_bind.append(bind)
+ else:
+ binds[idx] = [old_bind, bind]
+ else:
+ binds[idx] = bind
+ return binds
+
+ def _get_volume_binds(self, volumes):
+ '''
+ Extract host bindings, if any, from list of volume mapping strings.
+
+ :return: dictionary of bind mappings
+ '''
+ result = dict()
+ if volumes:
+ for vol in volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ elif len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = (parts + ['rw'])
+ if host is not None:
+ result[host] = dict(
+ bind=container,
+ mode=mode
+ )
+ return result
+
+ def _parse_exposed_ports(self, published_ports):
+ '''
+ Parse exposed ports from docker CLI-style ports syntax.
+ '''
+ exposed = []
+ if self.exposed_ports:
+ for port in self.exposed_ports:
+ port = to_text(port, errors='surrogate_or_strict').strip()
+ protocol = 'tcp'
+ match = re.search(r'(/.+$)', port)
+ if match:
+ protocol = match.group(1).replace('/', '')
+ port = re.sub(r'/.+$', '', port)
+ exposed.append((port, protocol))
+ if published_ports:
+ # Any published port should also be exposed
+ for publish_port in published_ports:
+ match = False
+ if isinstance(publish_port, string_types) and '/' in publish_port:
+ port, protocol = publish_port.split('/')
+ port = int(port)
+ else:
+ protocol = 'tcp'
+ port = int(publish_port)
+ for exposed_port in exposed:
+ if exposed_port[1] != protocol:
+ continue
+ if isinstance(exposed_port[0], string_types) and '-' in exposed_port[0]:
+ start_port, end_port = exposed_port[0].split('-')
+ if int(start_port) <= port <= int(end_port):
+ match = True
+ elif exposed_port[0] == port:
+ match = True
+ if not match:
+ exposed.append((port, protocol))
+ return exposed
+
+ @staticmethod
+ def _parse_links(links):
+ '''
+ Turn links into a dictionary
+ '''
+ if links is None:
+ return None
+
+ result = []
+ for link in links:
+ parsed_link = link.split(':', 1)
+ if len(parsed_link) == 2:
+ result.append((parsed_link[0], parsed_link[1]))
+ else:
+ result.append((parsed_link[0], parsed_link[0]))
+ return result
+
+ def _parse_ulimits(self):
+ '''
+ Turn ulimits into an array of Ulimit objects
+ '''
+ if self.ulimits is None:
+ return None
+
+ results = []
+ for limit in self.ulimits:
+ limits = dict()
+ pieces = limit.split(':')
+ if len(pieces) >= 2:
+ limits['name'] = pieces[0]
+ limits['soft'] = int(pieces[1])
+ limits['hard'] = int(pieces[1])
+ if len(pieces) == 3:
+ limits['hard'] = int(pieces[2])
+ try:
+ results.append(Ulimit(**limits))
+ except ValueError as exc:
+ self.fail("Error parsing ulimits value %s - %s" % (limit, exc))
+ return results
+
+ def _parse_sysctls(self):
+ '''
+ Turn sysctls into an hash of Sysctl objects
+ '''
+ return self.sysctls
+
+ def _parse_log_config(self):
+ '''
+ Create a LogConfig object
+ '''
+ if self.log_driver is None:
+ return None
+
+ options = dict(
+ Type=self.log_driver,
+ Config=dict()
+ )
+
+ if self.log_options is not None:
+ options['Config'] = dict()
+ for k, v in self.log_options.items():
+ if not isinstance(v, string_types):
+ self.client.module.warn(
+ "Non-string value found for log_options option '%s'. The value is automatically converted to '%s'. "
+ "If this is not correct, or you want to avoid such warnings, please quote the value." % (
+ k, to_text(v, errors='surrogate_or_strict'))
+ )
+ v = to_text(v, errors='surrogate_or_strict')
+ self.log_options[k] = v
+ options['Config'][k] = v
+
+ try:
+ return LogConfig(**options)
+ except ValueError as exc:
+ self.fail('Error parsing logging options - %s' % (exc))
+
+ def _parse_tmpfs(self):
+ '''
+ Turn tmpfs into a hash of Tmpfs objects
+ '''
+ result = dict()
+ if self.tmpfs is None:
+ return result
+
+ for tmpfs_spec in self.tmpfs:
+ split_spec = tmpfs_spec.split(":", 1)
+ if len(split_spec) > 1:
+ result[split_spec[0]] = split_spec[1]
+ else:
+ result[split_spec[0]] = ""
+ return result
+
+ def _get_environment(self):
+ """
+ If environment file is combined with explicit environment variables, the explicit environment variables
+ take precedence.
+ """
+ final_env = {}
+ if self.env_file:
+ parsed_env_file = utils.parse_env_file(self.env_file)
+ for name, value in parsed_env_file.items():
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ if self.env:
+ for name, value in self.env.items():
+ if not isinstance(value, string_types):
+ self.fail("Non-string value found for env option. Ambiguous env options must be "
+ "wrapped in quotes to avoid them being interpreted. Key: %s" % (name, ))
+ final_env[name] = to_text(value, errors='surrogate_or_strict')
+ return final_env
+
+ def _get_network_id(self, network_name):
+ network_id = None
+ try:
+ for network in self.client.networks(names=[network_name]):
+ if network['Name'] == network_name:
+ network_id = network['Id']
+ break
+ except Exception as exc:
+ self.fail("Error getting network id for %s - %s" % (network_name, to_native(exc)))
+ return network_id
+
+ def _process_mounts(self):
+ if self.mounts is None:
+ return None, None
+ mounts_list = []
+ mounts_expected = []
+ for mount in self.mounts:
+ target = mount['target']
+ datatype = mount['type']
+ mount_dict = dict(mount)
+ # Sanity checks (so we don't wait for docker-py to barf on input)
+ if mount_dict.get('source') is None and datatype != 'tmpfs':
+ self.client.fail('source must be specified for mount "{0}" of type "{1}"'.format(target, datatype))
+ mount_option_types = dict(
+ volume_driver='volume',
+ volume_options='volume',
+ propagation='bind',
+ no_copy='volume',
+ labels='volume',
+ tmpfs_size='tmpfs',
+ tmpfs_mode='tmpfs',
+ )
+ for option, req_datatype in mount_option_types.items():
+ if mount_dict.get(option) is not None and datatype != req_datatype:
+ self.client.fail('{0} cannot be specified for mount "{1}" of type "{2}" (needs type "{3}")'.format(option, target, datatype, req_datatype))
+ # Handle volume_driver and volume_options
+ volume_driver = mount_dict.pop('volume_driver')
+ volume_options = mount_dict.pop('volume_options')
+ if volume_driver:
+ if volume_options:
+ volume_options = clean_dict_booleans_for_docker_api(volume_options)
+ mount_dict['driver_config'] = docker_types.DriverConfig(name=volume_driver, options=volume_options)
+ if mount_dict['labels']:
+ mount_dict['labels'] = clean_dict_booleans_for_docker_api(mount_dict['labels'])
+ if mount_dict.get('tmpfs_size') is not None:
+ try:
+ mount_dict['tmpfs_size'] = human_to_bytes(mount_dict['tmpfs_size'])
+ except ValueError as exc:
+ self.fail('Failed to convert tmpfs_size of mount "{0}" to bytes: {1}'.format(target, exc))
+ if mount_dict.get('tmpfs_mode') is not None:
+ try:
+ mount_dict['tmpfs_mode'] = int(mount_dict['tmpfs_mode'], 8)
+ except Exception as dummy:
+ self.client.fail('tmp_fs mode of mount "{0}" is not an octal string!'.format(target))
+ # Fill expected mount dict
+ mount_expected = dict(mount)
+ mount_expected['tmpfs_size'] = mount_dict['tmpfs_size']
+ mount_expected['tmpfs_mode'] = mount_dict['tmpfs_mode']
+ # Add result to lists
+ mounts_list.append(docker_types.Mount(**mount_dict))
+ mounts_expected.append(omit_none_from_dict(mount_expected))
+ return mounts_list, mounts_expected
+
+ def _process_rate_bps(self, option):
+ """
+ Format device_read_bps and device_write_bps option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ device_dict['Rate'] = human_to_bytes(device_dict['Rate'])
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _process_rate_iops(self, option):
+ """
+ Format device_read_iops and device_write_iops option
+ """
+ devices_list = []
+ for v in getattr(self, option):
+ device_dict = dict((x.title(), y) for x, y in v.items())
+ devices_list.append(device_dict)
+
+ setattr(self, option, devices_list)
+
+ def _replace_container_names(self, mode):
+ """
+ Parse IPC and PID modes. If they contain a container name, replace
+ with the container's ID.
+ """
+ if mode is None or not mode.startswith('container:'):
+ return mode
+ container_name = mode[len('container:'):]
+ # Try to inspect container to see whether this is an ID or a
+ # name (and in the latter case, retrieve it's ID)
+ container = self.client.get_container(container_name)
+ if container is None:
+ # If we can't find the container, issue a warning and continue with
+ # what the user specified.
+ self.client.module.warn('Cannot find a container with name or ID "{0}"'.format(container_name))
+ return mode
+ return 'container:{0}'.format(container['Id'])
+
+ def _check_mount_target_collisions(self):
+ last = dict()
+
+ def f(t, name):
+ if t in last:
+ if name == last[t]:
+ self.client.fail('The mount point "{0}" appears twice in the {1} option'.format(t, name))
+ else:
+ self.client.fail('The mount point "{0}" appears both in the {1} and {2} option'.format(t, name, last[t]))
+ last[t] = name
+
+ if self.expected_mounts:
+ for t in [m['target'] for m in self.expected_mounts]:
+ f(t, 'mounts')
+ if self.volumes:
+ for v in self.volumes:
+ vs = v.split(':')
+ f(vs[0 if len(vs) == 1 else 1], 'volumes')
+
+
+class Container(DockerBaseClass):
+
+ def __init__(self, container, parameters):
+ super(Container, self).__init__()
+ self.raw = container
+ self.Id = None
+ self.container = container
+ if container:
+ self.Id = container['Id']
+ self.Image = container['Image']
+ self.log(self.container, pretty_print=True)
+ self.parameters = parameters
+ self.parameters.expected_links = None
+ self.parameters.expected_ports = None
+ self.parameters.expected_exposed = None
+ self.parameters.expected_volumes = None
+ self.parameters.expected_ulimits = None
+ self.parameters.expected_sysctls = None
+ self.parameters.expected_etc_hosts = None
+ self.parameters.expected_env = None
+ self.parameters.expected_device_requests = None
+ self.parameters_map = dict()
+ self.parameters_map['expected_links'] = 'links'
+ self.parameters_map['expected_ports'] = 'expected_ports'
+ self.parameters_map['expected_exposed'] = 'exposed_ports'
+ self.parameters_map['expected_volumes'] = 'volumes'
+ self.parameters_map['expected_ulimits'] = 'ulimits'
+ self.parameters_map['expected_sysctls'] = 'sysctls'
+ self.parameters_map['expected_etc_hosts'] = 'etc_hosts'
+ self.parameters_map['expected_env'] = 'env'
+ self.parameters_map['expected_entrypoint'] = 'entrypoint'
+ self.parameters_map['expected_binds'] = 'volumes'
+ self.parameters_map['expected_cmd'] = 'command'
+ self.parameters_map['expected_devices'] = 'devices'
+ self.parameters_map['expected_healthcheck'] = 'healthcheck'
+ self.parameters_map['expected_mounts'] = 'mounts'
+ self.parameters_map['expected_device_requests'] = 'device_requests'
+
+ def fail(self, msg):
+ self.parameters.client.fail(msg)
+
+ @property
+ def exists(self):
+ return True if self.container else False
+
+ @property
+ def removing(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Status') == 'removing'
+ return False
+
+ @property
+ def running(self):
+ if self.container and self.container.get('State'):
+ if self.container['State'].get('Running') and not self.container['State'].get('Ghost', False):
+ return True
+ return False
+
+ @property
+ def paused(self):
+ if self.container and self.container.get('State'):
+ return self.container['State'].get('Paused', False)
+ return False
+
+ def _compare(self, a, b, compare):
+ '''
+ Compare values a and b as described in compare.
+ '''
+ return compare_generic(a, b, compare['comparison'], compare['type'])
+
+ def _decode_mounts(self, mounts):
+ if not mounts:
+ return mounts
+ result = []
+ empty_dict = dict()
+ for mount in mounts:
+ res = dict()
+ res['type'] = mount.get('Type')
+ res['source'] = mount.get('Source')
+ res['target'] = mount.get('Target')
+ res['read_only'] = mount.get('ReadOnly', False) # golang's omitempty for bool returns None for False
+ res['consistency'] = mount.get('Consistency')
+ res['propagation'] = mount.get('BindOptions', empty_dict).get('Propagation')
+ res['no_copy'] = mount.get('VolumeOptions', empty_dict).get('NoCopy', False)
+ res['labels'] = mount.get('VolumeOptions', empty_dict).get('Labels', empty_dict)
+ res['volume_driver'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Name')
+ res['volume_options'] = mount.get('VolumeOptions', empty_dict).get('DriverConfig', empty_dict).get('Options', empty_dict)
+ res['tmpfs_size'] = mount.get('TmpfsOptions', empty_dict).get('SizeBytes')
+ res['tmpfs_mode'] = mount.get('TmpfsOptions', empty_dict).get('Mode')
+ result.append(res)
+ return result
+
+ def has_different_configuration(self, image):
+ '''
+ Diff parameters vs existing container config. Returns tuple: (True | False, List of differences)
+ '''
+ self.log('Starting has_different_configuration')
+ self.parameters.expected_entrypoint = self._get_expected_entrypoint()
+ self.parameters.expected_links = self._get_expected_links()
+ self.parameters.expected_ports = self._get_expected_ports()
+ self.parameters.expected_exposed = self._get_expected_exposed(image)
+ self.parameters.expected_volumes = self._get_expected_volumes(image)
+ self.parameters.expected_binds = self._get_expected_binds(image)
+ self.parameters.expected_ulimits = self._get_expected_ulimits(self.parameters.ulimits)
+ self.parameters.expected_sysctls = self._get_expected_sysctls(self.parameters.sysctls)
+ self.parameters.expected_etc_hosts = self._convert_simple_dict_to_list('etc_hosts')
+ self.parameters.expected_env = self._get_expected_env(image)
+ self.parameters.expected_cmd = self._get_expected_cmd()
+ self.parameters.expected_devices = self._get_expected_devices()
+ self.parameters.expected_healthcheck = self._get_expected_healthcheck()
+ self.parameters.expected_device_requests = self._get_expected_device_requests()
+
+ if not self.container.get('HostConfig'):
+ self.fail("has_config_diff: Error parsing container properties. HostConfig missing.")
+ if not self.container.get('Config'):
+ self.fail("has_config_diff: Error parsing container properties. Config missing.")
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_config_diff: Error parsing container properties. NetworkSettings missing.")
+
+ host_config = self.container['HostConfig']
+ log_config = host_config.get('LogConfig', dict())
+ config = self.container['Config']
+ network = self.container['NetworkSettings']
+
+ # The previous version of the docker module ignored the detach state by
+ # assuming if the container was running, it must have been detached.
+ detach = not (config.get('AttachStderr') and config.get('AttachStdout'))
+
+ # "ExposedPorts": null returns None type & causes AttributeError - PR #5517
+ if config.get('ExposedPorts') is not None:
+ expected_exposed = [self._normalize_port(p) for p in config.get('ExposedPorts', dict()).keys()]
+ else:
+ expected_exposed = []
+
+ # Map parameters to container inspect results
+ config_mapping = dict(
+ expected_cmd=config.get('Cmd'),
+ domainname=config.get('Domainname'),
+ hostname=config.get('Hostname'),
+ user=config.get('User'),
+ detach=detach,
+ init=host_config.get('Init'),
+ interactive=config.get('OpenStdin'),
+ capabilities=host_config.get('CapAdd'),
+ cap_drop=host_config.get('CapDrop'),
+ cgroup_parent=host_config.get('CgroupParent'),
+ expected_devices=host_config.get('Devices'),
+ dns_servers=host_config.get('Dns'),
+ dns_opts=host_config.get('DnsOptions'),
+ dns_search_domains=host_config.get('DnsSearch'),
+ expected_env=(config.get('Env') or []),
+ expected_entrypoint=config.get('Entrypoint'),
+ expected_etc_hosts=host_config['ExtraHosts'],
+ expected_exposed=expected_exposed,
+ groups=host_config.get('GroupAdd'),
+ ipc_mode=host_config.get("IpcMode"),
+ labels=config.get('Labels'),
+ expected_links=host_config.get('Links'),
+ mac_address=config.get('MacAddress', network.get('MacAddress')),
+ memory_swappiness=host_config.get('MemorySwappiness'),
+ network_mode=host_config.get('NetworkMode'),
+ userns_mode=host_config.get('UsernsMode'),
+ oom_killer=host_config.get('OomKillDisable'),
+ oom_score_adj=host_config.get('OomScoreAdj'),
+ pid_mode=host_config.get('PidMode'),
+ privileged=host_config.get('Privileged'),
+ expected_ports=host_config.get('PortBindings'),
+ read_only=host_config.get('ReadonlyRootfs'),
+ runtime=host_config.get('Runtime'),
+ shm_size=host_config.get('ShmSize'),
+ security_opts=host_config.get("SecurityOpt"),
+ stop_signal=config.get("StopSignal"),
+ tmpfs=host_config.get('Tmpfs'),
+ tty=config.get('Tty'),
+ expected_ulimits=host_config.get('Ulimits'),
+ expected_sysctls=host_config.get('Sysctls'),
+ uts=host_config.get('UTSMode'),
+ expected_volumes=config.get('Volumes'),
+ expected_binds=host_config.get('Binds'),
+ volume_driver=host_config.get('VolumeDriver'),
+ volumes_from=host_config.get('VolumesFrom'),
+ working_dir=config.get('WorkingDir'),
+ publish_all_ports=host_config.get('PublishAllPorts'),
+ expected_healthcheck=config.get('Healthcheck'),
+ disable_healthcheck=(not config.get('Healthcheck') or config.get('Healthcheck').get('Test') == ['NONE']),
+ device_read_bps=host_config.get('BlkioDeviceReadBps'),
+ device_write_bps=host_config.get('BlkioDeviceWriteBps'),
+ device_read_iops=host_config.get('BlkioDeviceReadIOps'),
+ device_write_iops=host_config.get('BlkioDeviceWriteIOps'),
+ expected_device_requests=host_config.get('DeviceRequests'),
+ pids_limit=host_config.get('PidsLimit'),
+ # According to https://github.com/moby/moby/, support for HostConfig.Mounts
+ # has been included at least since v17.03.0-ce, which has API version 1.26.
+ # The previous tag, v1.9.1, has API version 1.21 and does not have
+ # HostConfig.Mounts. I have no idea what about API 1.25...
+ expected_mounts=self._decode_mounts(host_config.get('Mounts')),
+ cpus=host_config.get('NanoCpus'),
+ )
+ # Options which don't make sense without their accompanying option
+ if self.parameters.log_driver:
+ config_mapping['log_driver'] = log_config.get('Type')
+ config_mapping['log_options'] = log_config.get('Config')
+
+ if self.parameters.client.option_minimal_versions['auto_remove']['supported']:
+ # auto_remove is only supported in Docker SDK for Python >= 2.0.0; unfortunately
+ # it has a default value, that's why we have to jump through the hoops here
+ config_mapping['auto_remove'] = host_config.get('AutoRemove')
+
+ if self.parameters.client.option_minimal_versions['stop_timeout']['supported']:
+ # stop_timeout is only supported in Docker SDK for Python >= 2.1. Note that
+ # stop_timeout has a hybrid role, in that it used to be something only used
+ # for stopping containers, and is now also used as a container property.
+ # That's why it needs special handling here.
+ config_mapping['stop_timeout'] = config.get('StopTimeout')
+
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # For docker API < 1.22, update_container() is not supported. Thus
+ # we need to handle all limits which are usually handled by
+ # update_container() as configuration changes which require a container
+ # restart.
+ restart_policy = host_config.get('RestartPolicy', dict())
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ config_mapping.update(dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ ))
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ minimal_version = self.parameters.client.option_minimal_versions.get(key, {})
+ if not minimal_version.get('supported', True):
+ continue
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ self.log('check differences %s %s vs %s (%s)' % (key, getattr(self.parameters, key), to_text(value, errors='surrogate_or_strict'), compare))
+ if getattr(self.parameters, key, None) is not None:
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ p = getattr(self.parameters, key)
+ c = value
+ if compare['type'] == 'set':
+ # Since the order does not matter, sort so that the diff output is better.
+ if p is not None:
+ p = sorted(p)
+ if c is not None:
+ c = sorted(c)
+ elif compare['type'] == 'set(dict)':
+ # Since the order does not matter, sort so that the diff output is better.
+ if key == 'expected_mounts':
+ # For selected values, use one entry as key
+ def sort_key_fn(x):
+ return x['target']
+ else:
+ # We sort the list of dictionaries by using the sorted items of a dict as its key.
+ def sort_key_fn(x):
+ return sorted((a, to_text(b, errors='surrogate_or_strict')) for a, b in x.items())
+ if p is not None:
+ p = sorted(p, key=sort_key_fn)
+ if c is not None:
+ c = sorted(c, key=sort_key_fn)
+ differences.add(key, parameter=p, active=c)
+
+ has_differences = not differences.empty
+ return has_differences, differences
+
+ def has_different_resource_limits(self):
+ '''
+ Diff parameters and container resource limits
+ '''
+ if not self.container.get('HostConfig'):
+ self.fail("limits_differ_from_container: Error parsing container properties. HostConfig missing.")
+ if self.parameters.client.docker_api_version < LooseVersion('1.22'):
+ # update_container() call not supported
+ return False, []
+
+ host_config = self.container['HostConfig']
+
+ restart_policy = host_config.get('RestartPolicy') or dict()
+
+ config_mapping = dict(
+ blkio_weight=host_config.get('BlkioWeight'),
+ cpu_period=host_config.get('CpuPeriod'),
+ cpu_quota=host_config.get('CpuQuota'),
+ cpu_shares=host_config.get('CpuShares'),
+ cpuset_cpus=host_config.get('CpusetCpus'),
+ cpuset_mems=host_config.get('CpusetMems'),
+ kernel_memory=host_config.get("KernelMemory"),
+ memory=host_config.get('Memory'),
+ memory_reservation=host_config.get('MemoryReservation'),
+ memory_swap=host_config.get('MemorySwap'),
+ restart_policy=restart_policy.get('Name')
+ )
+
+ # Options which don't make sense without their accompanying option
+ if self.parameters.restart_policy:
+ config_mapping['restart_retries'] = restart_policy.get('MaximumRetryCount')
+
+ differences = DifferenceTracker()
+ for key, value in config_mapping.items():
+ if getattr(self.parameters, key, None):
+ compare = self.parameters.client.comparisons[self.parameters_map.get(key, key)]
+ match = self._compare(getattr(self.parameters, key), value, compare)
+
+ if not match:
+ # no match. record the differences
+ differences.add(key, parameter=getattr(self.parameters, key), active=value)
+ different = not differences.empty
+ return different, differences
+
+ def has_network_differences(self):
+ '''
+ Check if the container is connected to requested networks with expected options: links, aliases, ipv4, ipv6
+ '''
+ different = False
+ differences = []
+
+ if not self.parameters.networks:
+ return different, differences
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_missing_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings']['Networks']
+ for network in self.parameters.networks:
+ network_info = connected_networks.get(network['name'])
+ if network_info is None:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=None
+ ))
+ else:
+ diff = False
+ network_info_ipam = network_info.get('IPAMConfig') or {}
+ if network.get('ipv4_address') and network['ipv4_address'] != network_info_ipam.get('IPv4Address'):
+ diff = True
+ if network.get('ipv6_address') and network['ipv6_address'] != network_info_ipam.get('IPv6Address'):
+ diff = True
+ if network.get('aliases'):
+ if not compare_generic(network['aliases'], network_info.get('Aliases'), 'allow_more_present', 'set'):
+ diff = True
+ if network.get('links'):
+ expected_links = []
+ for link, alias in network['links']:
+ expected_links.append("%s:%s" % (link, alias))
+ if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'):
+ diff = True
+ if diff:
+ different = True
+ differences.append(dict(
+ parameter=network,
+ container=dict(
+ name=network['name'],
+ ipv4_address=network_info_ipam.get('IPv4Address'),
+ ipv6_address=network_info_ipam.get('IPv6Address'),
+ aliases=network_info.get('Aliases'),
+ links=network_info.get('Links')
+ )
+ ))
+ return different, differences
+
+ def has_extra_networks(self):
+ '''
+ Check if the container is connected to non-requested networks
+ '''
+ extra_networks = []
+ extra = False
+
+ if not self.container.get('NetworkSettings'):
+ self.fail("has_extra_networks: Error parsing container properties. NetworkSettings missing.")
+
+ connected_networks = self.container['NetworkSettings'].get('Networks')
+ if connected_networks:
+ for network, network_config in connected_networks.items():
+ keep = False
+ if self.parameters.networks:
+ for expected_network in self.parameters.networks:
+ if expected_network['name'] == network:
+ keep = True
+ if not keep:
+ extra = True
+ extra_networks.append(dict(name=network, id=network_config['NetworkID']))
+ return extra, extra_networks
+
+ def _get_expected_devices(self):
+ if not self.parameters.devices:
+ return None
+ expected_devices = []
+ for device in self.parameters.devices:
+ parts = device.split(':')
+ if len(parts) == 1:
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[0],
+ PathOnHost=parts[0]
+ ))
+ elif len(parts) == 2:
+ parts = device.split(':')
+ expected_devices.append(
+ dict(
+ CgroupPermissions='rwm',
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ )
+ )
+ else:
+ expected_devices.append(
+ dict(
+ CgroupPermissions=parts[2],
+ PathInContainer=parts[1],
+ PathOnHost=parts[0]
+ ))
+ return expected_devices
+
+ def _get_expected_entrypoint(self):
+ if not self.parameters.entrypoint:
+ return None
+ return shlex.split(self.parameters.entrypoint)
+
+ def _get_expected_ports(self):
+ if self.parameters.published_ports is None:
+ return None
+ expected_bound_ports = {}
+ for container_port, config in self.parameters.published_ports.items():
+ if isinstance(container_port, int):
+ container_port = "%s/tcp" % container_port
+ if len(config) == 1:
+ if isinstance(config[0], int):
+ expected_bound_ports[container_port] = [{'HostIp': "0.0.0.0", 'HostPort': config[0]}]
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': ""}]
+ elif isinstance(config[0], tuple):
+ expected_bound_ports[container_port] = []
+ for host_ip, host_port in config:
+ expected_bound_ports[container_port].append({'HostIp': host_ip, 'HostPort': to_text(host_port, errors='surrogate_or_strict')})
+ else:
+ expected_bound_ports[container_port] = [{'HostIp': config[0], 'HostPort': to_text(config[1], errors='surrogate_or_strict')}]
+ return expected_bound_ports
+
+ def _get_expected_links(self):
+ if self.parameters.links is None:
+ return None
+ self.log('parameter links:')
+ self.log(self.parameters.links, pretty_print=True)
+ exp_links = []
+ for link, alias in self.parameters.links:
+ exp_links.append("/%s:%s/%s" % (link, ('/' + self.parameters.name), alias))
+ return exp_links
+
+ def _get_expected_binds(self, image):
+ self.log('_get_expected_binds')
+ image_vols = []
+ if image:
+ image_vols = self._get_image_binds(image[self.parameters.client.image_inspect_source].get('Volumes'))
+ param_vols = []
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ host = None
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ host, container, mode = parts
+ if not is_volume_permissions(mode):
+ self.fail('Found invalid volumes mode: {0}'.format(mode))
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ host, container, mode = parts + ['rw']
+ if host:
+ param_vols.append("%s:%s:%s" % (host, container, mode))
+ result = list(set(image_vols + param_vols))
+ self.log("expected_binds:")
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_device_requests(self):
+ if self.parameters.device_requests is None:
+ return None
+ device_requests = []
+ for dr in self.parameters.device_requests:
+ device_requests.append({
+ 'Driver': dr['driver'],
+ 'Count': dr['count'],
+ 'DeviceIDs': dr['device_ids'],
+ 'Capabilities': dr['capabilities'],
+ 'Options': dr['options'],
+ })
+ return device_requests
+
+ def _get_image_binds(self, volumes):
+ '''
+ Convert array of binds to array of strings with format host_path:container_path:mode
+
+ :param volumes: array of bind dicts
+ :return: array of strings
+ '''
+ results = []
+ if isinstance(volumes, dict):
+ results += self._get_bind_from_dict(volumes)
+ elif isinstance(volumes, list):
+ for vol in volumes:
+ results += self._get_bind_from_dict(vol)
+ return results
+
+ @staticmethod
+ def _get_bind_from_dict(volume_dict):
+ results = []
+ if volume_dict:
+ for host_path, config in volume_dict.items():
+ if isinstance(config, dict) and config.get('bind'):
+ container_path = config.get('bind')
+ mode = config.get('mode', 'rw')
+ results.append("%s:%s:%s" % (host_path, container_path, mode))
+ return results
+
+ def _get_expected_volumes(self, image):
+ self.log('_get_expected_volumes')
+ expected_vols = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Volumes'):
+ expected_vols.update(image[self.parameters.client.image_inspect_source].get('Volumes'))
+
+ if self.parameters.volumes:
+ for vol in self.parameters.volumes:
+ # We only expect anonymous volumes to show up in the list
+ if ':' in vol:
+ parts = vol.split(':')
+ if len(parts) == 3:
+ continue
+ if len(parts) == 2:
+ if not is_volume_permissions(parts[1]):
+ continue
+ expected_vols[vol] = dict()
+
+ if not expected_vols:
+ expected_vols = None
+ self.log("expected_volumes:")
+ self.log(expected_vols, pretty_print=True)
+ return expected_vols
+
+ def _get_expected_env(self, image):
+ self.log('_get_expected_env')
+ expected_env = dict()
+ if image and image[self.parameters.client.image_inspect_source].get('Env'):
+ for env_var in image[self.parameters.client.image_inspect_source]['Env']:
+ parts = env_var.split('=', 1)
+ expected_env[parts[0]] = parts[1]
+ if self.parameters.env:
+ expected_env.update(self.parameters.env)
+ param_env = []
+ for key, value in expected_env.items():
+ param_env.append("%s=%s" % (key, value))
+ return param_env
+
+ def _get_expected_exposed(self, image):
+ self.log('_get_expected_exposed')
+ image_ports = []
+ if image:
+ image_exposed_ports = image[self.parameters.client.image_inspect_source].get('ExposedPorts') or {}
+ image_ports = [self._normalize_port(p) for p in image_exposed_ports.keys()]
+ param_ports = []
+ if self.parameters.ports:
+ param_ports = [to_text(p[0], errors='surrogate_or_strict') + '/' + p[1] for p in self.parameters.ports]
+ result = list(set(image_ports + param_ports))
+ self.log(result, pretty_print=True)
+ return result
+
+ def _get_expected_ulimits(self, config_ulimits):
+ self.log('_get_expected_ulimits')
+ if config_ulimits is None:
+ return None
+ results = []
+ for limit in config_ulimits:
+ results.append(dict(
+ Name=limit.name,
+ Soft=limit.soft,
+ Hard=limit.hard
+ ))
+ return results
+
+ def _get_expected_sysctls(self, config_sysctls):
+ self.log('_get_expected_sysctls')
+ if config_sysctls is None:
+ return None
+ result = dict()
+ for key, value in config_sysctls.items():
+ result[key] = to_text(value, errors='surrogate_or_strict')
+ return result
+
+ def _get_expected_cmd(self):
+ self.log('_get_expected_cmd')
+ if not self.parameters.command:
+ return None
+ return shlex.split(self.parameters.command)
+
+ def _convert_simple_dict_to_list(self, param_name, join_with=':'):
+ if getattr(self.parameters, param_name, None) is None:
+ return None
+ results = []
+ for key, value in getattr(self.parameters, param_name).items():
+ results.append("%s%s%s" % (key, join_with, value))
+ return results
+
+ def _normalize_port(self, port):
+ if '/' not in port:
+ return port + '/tcp'
+ return port
+
+ def _get_expected_healthcheck(self):
+ self.log('_get_expected_healthcheck')
+ expected_healthcheck = dict()
+
+ if self.parameters.healthcheck:
+ expected_healthcheck.update([(k.title().replace("_", ""), v)
+ for k, v in self.parameters.healthcheck.items()])
+
+ return expected_healthcheck
+
+
+class ContainerManager(DockerBaseClass):
+ '''
+ Perform container management tasks
+ '''
+
+ def __init__(self, client):
+
+ super(ContainerManager, self).__init__()
+
+ if client.module.params.get('log_options') and not client.module.params.get('log_driver'):
+ client.module.warn('log_options is ignored when log_driver is not specified')
+ if client.module.params.get('healthcheck') and not client.module.params.get('healthcheck').get('test'):
+ client.module.warn('healthcheck is ignored when test is not specified')
+ if client.module.params.get('restart_retries') is not None and not client.module.params.get('restart_policy'):
+ client.module.warn('restart_retries is ignored when restart_policy is not specified')
+
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {'changed': False, 'actions': []}
+ self.diff = {}
+ self.diff_tracker = DifferenceTracker()
+ self.facts = {}
+
+ state = self.parameters.state
+ if state in ('stopped', 'started', 'present'):
+ self.present(state)
+ elif state == 'absent':
+ self.absent()
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ if self.client.module._diff or self.parameters.debug:
+ self.diff['before'], self.diff['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff
+
+ if self.facts:
+ self.results['container'] = self.facts
+
+ def wait_for_state(self, container_id, complete_states=None, wait_states=None, accept_removal=False, max_wait=None):
+ delay = 1.0
+ total_wait = 0
+ while True:
+ # Inspect container
+ result = self.client.get_container_by_id(container_id)
+ if result is None:
+ if accept_removal:
+ return
+ msg = 'Encontered vanished container while waiting for container "{0}"'
+ self.fail(msg.format(container_id))
+ # Check container state
+ state = result.get('State', {}).get('Status')
+ if complete_states is not None and state in complete_states:
+ return
+ if wait_states is not None and state not in wait_states:
+ msg = 'Encontered unexpected state "{1}" while waiting for container "{0}"'
+ self.fail(msg.format(container_id, state))
+ # Wait
+ if max_wait is not None:
+ if total_wait > max_wait:
+ msg = 'Timeout of {1} seconds exceeded while waiting for container "{0}"'
+ self.fail(msg.format(container_id, max_wait))
+ if total_wait + delay > max_wait:
+ delay = max_wait - total_wait
+ sleep(delay)
+ total_wait += delay
+ # Exponential backoff, but never wait longer than 10 seconds
+ # (1.1**24 < 10, 1.1**25 > 10, so it will take 25 iterations
+ # until the maximal 10 seconds delay is reached. By then, the
+ # code will have slept for ~1.5 minutes.)
+ delay = min(delay * 1.1, 10)
+
+ def present(self, state):
+ container = self._get_container(self.parameters.name)
+ was_running = container.running
+ was_paused = container.paused
+ container_created = False
+
+ # If the image parameter was passed then we need to deal with the image
+ # version comparison. Otherwise we handle this depending on whether
+ # the container already runs or not; in the former case, in case the
+ # container needs to be restarted, we use the existing container's
+ # image ID.
+ image = self._get_image()
+ self.log(image, pretty_print=True)
+ if not container.exists or container.removing:
+ # New container
+ if container.removing:
+ self.log('Found container in removal phase')
+ else:
+ self.log('No container found')
+ if not self.parameters.image:
+ self.fail('Cannot create container when image is not specified!')
+ self.diff_tracker.add('exists', parameter=True, active=False)
+ if container.removing and not self.check_mode:
+ # Wait for container to be removed before trying to create it
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(self.parameters.image, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+ else:
+ # Existing container
+ different, differences = container.has_different_configuration(image)
+ image_different = False
+ if self.parameters.comparisons['image']['comparison'] == 'strict':
+ image_different = self._image_is_different(image, container)
+ if image_different or different or self.parameters.recreate:
+ self.diff_tracker.merge(differences)
+ self.diff['differences'] = differences.get_legacy_docker_container_diffs()
+ if image_different:
+ self.diff['image_different'] = True
+ self.log("differences")
+ self.log(differences.get_legacy_docker_container_diffs(), pretty_print=True)
+ image_to_use = self.parameters.image
+ if not image_to_use and container and container.Image:
+ image_to_use = container.Image
+ if not image_to_use:
+ self.fail('Cannot recreate container when image is not specified or cannot be extracted from current container!')
+ if container.running:
+ self.container_stop(container.Id)
+ self.container_remove(container.Id)
+ if not self.check_mode:
+ self.wait_for_state(
+ container.Id, wait_states=['removing'], accept_removal=True, max_wait=self.parameters.removal_wait_timeout)
+ new_container = self.container_create(image_to_use, self.parameters.create_parameters)
+ if new_container:
+ container = new_container
+ container_created = True
+
+ if container and container.exists:
+ container = self.update_limits(container)
+ container = self.update_networks(container, container_created)
+
+ if state == 'started' and not container.running:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ container = self.container_start(container.Id)
+ elif state == 'started' and self.parameters.restart:
+ self.diff_tracker.add('running', parameter=True, active=was_running)
+ self.diff_tracker.add('restarted', parameter=True, active=False)
+ container = self.container_restart(container.Id)
+ elif state == 'stopped' and container.running:
+ self.diff_tracker.add('running', parameter=False, active=was_running)
+ self.container_stop(container.Id)
+ container = self._get_container(container.Id)
+
+ if state == 'started' and self.parameters.paused is not None and container.paused != self.parameters.paused:
+ self.diff_tracker.add('paused', parameter=self.parameters.paused, active=was_paused)
+ if not self.check_mode:
+ try:
+ if self.parameters.paused:
+ self.client.pause(container=container.Id)
+ else:
+ self.client.unpause(container=container.Id)
+ except Exception as exc:
+ self.fail("Error %s container %s: %s" % (
+ "pausing" if self.parameters.paused else "unpausing", container.Id, to_native(exc)
+ ))
+ container = self._get_container(container.Id)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(set_paused=self.parameters.paused))
+
+ self.facts = container.raw
+
+ def absent(self):
+ container = self._get_container(self.parameters.name)
+ if container.exists:
+ if container.running:
+ self.diff_tracker.add('running', parameter=False, active=True)
+ self.container_stop(container.Id)
+ self.diff_tracker.add('exists', parameter=False, active=True)
+ self.container_remove(container.Id)
+
+ def fail(self, msg, **kwargs):
+ self.client.fail(msg, **kwargs)
+
+ def _output_logs(self, msg):
+ self.client.module.log(msg=msg)
+
+ def _get_container(self, container):
+ '''
+ Expects container ID or Name. Returns a container object
+ '''
+ return Container(self.client.get_container(container), self.parameters)
+
+ def _get_image(self):
+ if not self.parameters.image:
+ self.log('No image specified')
+ return None
+ if is_image_name_id(self.parameters.image):
+ image = self.client.find_image_by_id(self.parameters.image)
+ else:
+ repository, tag = utils.parse_repository_tag(self.parameters.image)
+ if not tag:
+ tag = "latest"
+ image = self.client.find_image(repository, tag)
+ if not image or self.parameters.pull:
+ if not self.check_mode:
+ self.log("Pull the image.")
+ image, alreadyToLatest = self.client.pull_image(repository, tag)
+ if alreadyToLatest:
+ self.results['changed'] = False
+ else:
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+ elif not image:
+ # If the image isn't there, claim we'll pull.
+ # (Implicitly: if the image is there, claim it already was latest.)
+ self.results['changed'] = True
+ self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag)))
+
+ self.log("image")
+ self.log(image, pretty_print=True)
+ return image
+
+ def _image_is_different(self, image, container):
+ if image and image.get('Id'):
+ if container and container.Image:
+ if image.get('Id') != container.Image:
+ self.diff_tracker.add('image', parameter=image.get('Id'), active=container.Image)
+ return True
+ return False
+
+ def update_limits(self, container):
+ limits_differ, different_limits = container.has_different_resource_limits()
+ if limits_differ:
+ self.log("limit differences:")
+ self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True)
+ self.diff_tracker.merge(different_limits)
+ if limits_differ and not self.check_mode:
+ self.container_update(container.Id, self.parameters.update_parameters)
+ return self._get_container(container.Id)
+ return container
+
+ def update_networks(self, container, container_created):
+ updated_container = container
+ if self.parameters.comparisons['networks']['comparison'] != 'ignore' or container_created:
+ has_network_differences, network_differences = container.has_network_differences()
+ if has_network_differences:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(network_differences=network_differences))
+ else:
+ self.diff['differences'] = [dict(network_differences=network_differences)]
+ for netdiff in network_differences:
+ self.diff_tracker.add(
+ 'network.{0}'.format(netdiff['parameter']['name']),
+ parameter=netdiff['parameter'],
+ active=netdiff['container']
+ )
+ self.results['changed'] = True
+ updated_container = self._add_networks(container, network_differences)
+
+ if (self.parameters.comparisons['networks']['comparison'] == 'strict' and self.parameters.networks is not None) or self.parameters.purge_networks:
+ has_extra_networks, extra_networks = container.has_extra_networks()
+ if has_extra_networks:
+ if self.diff.get('differences'):
+ self.diff['differences'].append(dict(purge_networks=extra_networks))
+ else:
+ self.diff['differences'] = [dict(purge_networks=extra_networks)]
+ for extra_network in extra_networks:
+ self.diff_tracker.add(
+ 'network.{0}'.format(extra_network['name']),
+ active=extra_network
+ )
+ self.results['changed'] = True
+ updated_container = self._purge_networks(container, extra_networks)
+ return updated_container
+
+ def _add_networks(self, container, differences):
+ for diff in differences:
+ # remove the container from the network, if connected
+ if diff.get('container'):
+ self.results['actions'].append(dict(removed_from_network=diff['parameter']['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, diff['parameter']['id'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (diff['parameter']['name'],
+ to_native(exc)))
+ # connect to the network
+ params = dict()
+ for para in ('ipv4_address', 'ipv6_address', 'links', 'aliases'):
+ if diff['parameter'].get(para):
+ params[para] = diff['parameter'][para]
+ self.results['actions'].append(dict(added_to_network=diff['parameter']['name'], network_parameters=params))
+ if not self.check_mode:
+ try:
+ self.log("Connecting container to network %s" % diff['parameter']['id'])
+ self.log(params, pretty_print=True)
+ self.client.connect_container_to_network(container.Id, diff['parameter']['id'], **params)
+ except Exception as exc:
+ self.fail("Error connecting container to network %s - %s" % (diff['parameter']['name'], to_native(exc)))
+ return self._get_container(container.Id)
+
+ def _purge_networks(self, container, networks):
+ for network in networks:
+ self.results['actions'].append(dict(removed_from_network=network['name']))
+ if not self.check_mode:
+ try:
+ self.client.disconnect_container_from_network(container.Id, network['name'])
+ except Exception as exc:
+ self.fail("Error disconnecting container from network %s - %s" % (network['name'],
+ to_native(exc)))
+ return self._get_container(container.Id)
+
+ def container_create(self, image, create_parameters):
+ self.log("create container")
+ self.log("image: %s parameters:" % image)
+ self.log(create_parameters, pretty_print=True)
+ self.results['actions'].append(dict(created="Created container", create_parameters=create_parameters))
+ self.results['changed'] = True
+ new_container = None
+ if not self.check_mode:
+ try:
+ new_container = self.client.create_container(image, **create_parameters)
+ self.client.report_warnings(new_container)
+ except Exception as exc:
+ self.fail("Error creating container: %s" % to_native(exc))
+ return self._get_container(new_container['Id'])
+ return new_container
+
+ def container_start(self, container_id):
+ self.log("start container %s" % (container_id))
+ self.results['actions'].append(dict(started=container_id))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ self.client.start(container=container_id)
+ except Exception as exc:
+ self.fail("Error starting container %s: %s" % (container_id, to_native(exc)))
+
+ if self.parameters.detach is False:
+ if self.client.docker_py_version >= LooseVersion('3.0'):
+ status = self.client.wait(container_id)['StatusCode']
+ else:
+ status = self.client.wait(container_id)
+ self.client.fail_results['status'] = status
+ self.results['status'] = status
+
+ if self.parameters.auto_remove:
+ output = "Cannot retrieve result as auto_remove is enabled"
+ if self.parameters.output_logs:
+ self.client.module.warn('Cannot output_logs if auto_remove is enabled!')
+ else:
+ config = self.client.inspect_container(container_id)
+ logging_driver = config['HostConfig']['LogConfig']['Type']
+
+ if logging_driver in ('json-file', 'journald'):
+ output = self.client.logs(container_id, stdout=True, stderr=True, stream=False, timestamps=False)
+ if self.parameters.output_logs:
+ self._output_logs(msg=output)
+ else:
+ output = "Result logged using `%s` driver" % logging_driver
+
+ if status != 0:
+ self.fail(output)
+ if self.parameters.cleanup:
+ self.container_remove(container_id, force=True)
+ insp = self._get_container(container_id)
+ if insp.raw:
+ insp.raw['Output'] = output
+ else:
+ insp.raw = dict(Output=output)
+ return insp
+ return self._get_container(container_id)
+
+ def container_remove(self, container_id, link=False, force=False):
+ volume_state = (not self.parameters.keep_volumes)
+ self.log("remove container container:%s v:%s link:%s force%s" % (container_id, volume_state, link, force))
+ self.results['actions'].append(dict(removed=container_id, volume_state=volume_state, link=link, force=force))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ response = self.client.remove_container(container_id, v=volume_state, link=link, force=force)
+ except NotFound as dummy:
+ pass
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ if 'removal of container ' in exc.explanation and ' is already in progress' in exc.explanation:
+ pass
+ else:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error removing container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+ def container_update(self, container_id, update_parameters):
+ if update_parameters:
+ self.log("update container %s" % (container_id))
+ self.log(update_parameters, pretty_print=True)
+ self.results['actions'].append(dict(updated=container_id, update_parameters=update_parameters))
+ self.results['changed'] = True
+ if not self.check_mode and callable(getattr(self.client, 'update_container')):
+ try:
+ result = self.client.update_container(container_id, **update_parameters)
+ self.client.report_warnings(result)
+ except Exception as exc:
+ self.fail("Error updating container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_kill(self, container_id):
+ self.results['actions'].append(dict(killed=container_id, signal=self.parameters.kill_signal))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ try:
+ if self.parameters.kill_signal:
+ response = self.client.kill(container_id, signal=self.parameters.kill_signal)
+ else:
+ response = self.client.kill(container_id)
+ except Exception as exc:
+ self.fail("Error killing container %s: %s" % (container_id, exc))
+ return response
+
+ def container_restart(self, container_id):
+ self.results['actions'].append(dict(restarted=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ if not self.check_mode:
+ try:
+ if self.parameters.stop_timeout:
+ dummy = self.client.restart(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ dummy = self.client.restart(container_id)
+ except Exception as exc:
+ self.fail("Error restarting container %s: %s" % (container_id, to_native(exc)))
+ return self._get_container(container_id)
+
+ def container_stop(self, container_id):
+ if self.parameters.force_kill:
+ self.container_kill(container_id)
+ return
+ self.results['actions'].append(dict(stopped=container_id, timeout=self.parameters.stop_timeout))
+ self.results['changed'] = True
+ response = None
+ if not self.check_mode:
+ count = 0
+ while True:
+ try:
+ if self.parameters.stop_timeout:
+ response = self.client.stop(container_id, timeout=self.parameters.stop_timeout)
+ else:
+ response = self.client.stop(container_id)
+ except APIError as exc:
+ if 'Unpause the container before stopping or killing' in exc.explanation:
+ # New docker daemon versions do not allow containers to be removed
+ # if they are paused. Make sure we don't end up in an infinite loop.
+ if count == 3:
+ self.fail("Error removing container %s (tried to unpause three times): %s" % (container_id, to_native(exc)))
+ count += 1
+ # Unpause
+ try:
+ self.client.unpause(container=container_id)
+ except Exception as exc2:
+ self.fail("Error unpausing container %s for removal: %s" % (container_id, to_native(exc2)))
+ # Now try again
+ continue
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ except Exception as exc:
+ self.fail("Error stopping container %s: %s" % (container_id, to_native(exc)))
+ # We only loop when explicitly requested by 'continue'
+ break
+ return response
+
+
+def detect_ipvX_address_usage(client):
+ '''
+ Helper function to detect whether any specified network uses ipv4_address or ipv6_address
+ '''
+ for network in client.module.params.get("networks") or []:
+ if network.get('ipv4_address') is not None or network.get('ipv6_address') is not None:
+ return True
+ return False
+
+
+class AnsibleDockerClientContainer(AnsibleDockerClient):
+ # A list of module options which are not docker container properties
+ __NON_CONTAINER_PROPERTY_OPTIONS = tuple([
+ 'env_file', 'force_kill', 'keep_volumes', 'ignore_image', 'name', 'pull', 'purge_networks',
+ 'recreate', 'restart', 'state', 'networks', 'cleanup', 'kill_signal',
+ 'output_logs', 'paused', 'removal_wait_timeout', 'default_host_ip',
+ ] + list(DOCKER_COMMON_ARGS.keys()))
+
+ def _parse_comparisons(self):
+ comparisons = {}
+ comp_aliases = {}
+ # Put in defaults
+ explicit_types = dict(
+ command='list',
+ devices='set(dict)',
+ device_requests='set(dict)',
+ dns_search_domains='list',
+ dns_servers='list',
+ env='set',
+ entrypoint='list',
+ etc_hosts='set',
+ mounts='set(dict)',
+ networks='set(dict)',
+ ulimits='set(dict)',
+ device_read_bps='set(dict)',
+ device_write_bps='set(dict)',
+ device_read_iops='set(dict)',
+ device_write_iops='set(dict)',
+ )
+ all_options = set() # this is for improving user feedback when a wrong option was specified for comparison
+ default_values = dict(
+ stop_timeout='ignore',
+ )
+ for option, data in self.module.argument_spec.items():
+ all_options.add(option)
+ for alias in data.get('aliases', []):
+ all_options.add(alias)
+ # Ignore options which aren't used as container properties
+ if option in self.__NON_CONTAINER_PROPERTY_OPTIONS and option != 'networks':
+ continue
+ # Determine option type
+ if option in explicit_types:
+ datatype = explicit_types[option]
+ elif data['type'] == 'list':
+ datatype = 'set'
+ elif data['type'] == 'dict':
+ datatype = 'dict'
+ else:
+ datatype = 'value'
+ # Determine comparison type
+ if option in default_values:
+ comparison = default_values[option]
+ elif datatype in ('list', 'value'):
+ comparison = 'strict'
+ else:
+ comparison = 'allow_more_present'
+ comparisons[option] = dict(type=datatype, comparison=comparison, name=option)
+ # Keep track of aliases
+ comp_aliases[option] = option
+ for alias in data.get('aliases', []):
+ comp_aliases[alias] = option
+ # Process legacy ignore options
+ if self.module.params['ignore_image']:
+ comparisons['image']['comparison'] = 'ignore'
+ if self.module.params['purge_networks']:
+ comparisons['networks']['comparison'] = 'strict'
+ # Process options
+ if self.module.params.get('comparisons'):
+ # If '*' appears in comparisons, process it first
+ if '*' in self.module.params['comparisons']:
+ value = self.module.params['comparisons']['*']
+ if value not in ('strict', 'ignore'):
+ self.fail("The wildcard can only be used with comparison modes 'strict' and 'ignore'!")
+ for option, v in comparisons.items():
+ if option == 'networks':
+ # `networks` is special: only update if
+ # some value is actually specified
+ if self.module.params['networks'] is None:
+ continue
+ v['comparison'] = value
+ # Now process all other comparisons.
+ comp_aliases_used = {}
+ for key, value in self.module.params['comparisons'].items():
+ if key == '*':
+ continue
+ # Find main key
+ key_main = comp_aliases.get(key)
+ if key_main is None:
+ if key_main in all_options:
+ self.fail("The module option '%s' cannot be specified in the comparisons dict, "
+ "since it does not correspond to container's state!" % key)
+ self.fail("Unknown module option '%s' in comparisons dict!" % key)
+ if key_main in comp_aliases_used:
+ self.fail("Both '%s' and '%s' (aliases of %s) are specified in comparisons dict!" % (key, comp_aliases_used[key_main], key_main))
+ comp_aliases_used[key_main] = key
+ # Check value and update accordingly
+ if value in ('strict', 'ignore'):
+ comparisons[key_main]['comparison'] = value
+ elif value == 'allow_more_present':
+ if comparisons[key_main]['type'] == 'value':
+ self.fail("Option '%s' is a value and not a set/list/dict, so its comparison cannot be %s" % (key, value))
+ comparisons[key_main]['comparison'] = value
+ else:
+ self.fail("Unknown comparison mode '%s'!" % value)
+ # Add implicit options
+ comparisons['publish_all_ports'] = dict(type='value', comparison='strict', name='published_ports')
+ comparisons['expected_ports'] = dict(type='dict', comparison=comparisons['published_ports']['comparison'], name='expected_ports')
+ comparisons['disable_healthcheck'] = dict(type='value',
+ comparison='ignore' if comparisons['healthcheck']['comparison'] == 'ignore' else 'strict',
+ name='disable_healthcheck')
+ # Check legacy values
+ if self.module.params['ignore_image'] and comparisons['image']['comparison'] != 'ignore':
+ self.module.warn('The ignore_image option has been overridden by the comparisons option!')
+ if self.module.params['purge_networks'] and comparisons['networks']['comparison'] != 'strict':
+ self.module.warn('The purge_networks option has been overridden by the comparisons option!')
+ self.comparisons = comparisons
+
+ def _get_additional_minimal_versions(self):
+ stop_timeout_supported = self.docker_api_version >= LooseVersion('1.25')
+ stop_timeout_needed_for_update = self.module.params.get("stop_timeout") is not None and self.module.params.get('state') != 'absent'
+ if stop_timeout_supported:
+ stop_timeout_supported = self.docker_py_version >= LooseVersion('2.1')
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker SDK for Python's version is %s. Minimum version required is 2.1 to update "
+ "the container's stop_timeout configuration. "
+ "If you use the 'docker-py' module, you have to switch to the 'docker' Python package." % (docker_version,))
+ else:
+ if stop_timeout_needed_for_update and not stop_timeout_supported:
+ # We warn (instead of fail) since in older versions, stop_timeout was not used
+ # to update the container's configuration, but only when stopping a container.
+ self.module.warn("Docker API version is %s. Minimum version required is 1.25 to set or "
+ "update the container's stop_timeout configuration." % (self.docker_api_version_str,))
+ self.option_minimal_versions['stop_timeout']['supported'] = stop_timeout_supported
+
+ def __init__(self, **kwargs):
+ option_minimal_versions = dict(
+ # internal options
+ log_config=dict(),
+ publish_all_ports=dict(),
+ ports=dict(),
+ volume_binds=dict(),
+ name=dict(),
+ # normal options
+ device_read_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_read_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_bps=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_write_iops=dict(docker_py_version='1.9.0', docker_api_version='1.22'),
+ device_requests=dict(docker_py_version='4.3.0', docker_api_version='1.40'),
+ dns_opts=dict(docker_api_version='1.21', docker_py_version='1.10.0'),
+ ipc_mode=dict(docker_api_version='1.25'),
+ mac_address=dict(docker_api_version='1.25'),
+ oom_score_adj=dict(docker_api_version='1.22'),
+ shm_size=dict(docker_api_version='1.22'),
+ stop_signal=dict(docker_api_version='1.21'),
+ tmpfs=dict(docker_api_version='1.22'),
+ volume_driver=dict(docker_api_version='1.21'),
+ memory_reservation=dict(docker_api_version='1.21'),
+ kernel_memory=dict(docker_api_version='1.21'),
+ auto_remove=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.0.0', docker_api_version='1.24'),
+ init=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ runtime=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ sysctls=dict(docker_py_version='1.10.0', docker_api_version='1.24'),
+ userns_mode=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ uts=dict(docker_py_version='3.5.0', docker_api_version='1.25'),
+ pids_limit=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ mounts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ cpus=dict(docker_py_version='2.3.0', docker_api_version='1.25'),
+ # specials
+ ipvX_address_supported=dict(docker_py_version='1.9.0', docker_api_version='1.22',
+ detect_usage=detect_ipvX_address_usage,
+ usage_msg='ipv4_address or ipv6_address in networks'),
+ stop_timeout=dict(), # see _get_additional_minimal_versions()
+ )
+
+ super(AnsibleDockerClientContainer, self).__init__(
+ option_minimal_versions=option_minimal_versions,
+ option_minimal_versions_ignore_params=self.__NON_CONTAINER_PROPERTY_OPTIONS,
+ **kwargs
+ )
+
+ self.image_inspect_source = 'Config'
+ if self.docker_api_version < LooseVersion('1.21'):
+ self.image_inspect_source = 'ContainerConfig'
+
+ self._get_additional_minimal_versions()
+ self._parse_comparisons()
+
+ if self.module.params['container_default_behavior'] is None:
+ self.module.params['container_default_behavior'] = 'compatibility'
+ self.module.deprecate(
+ 'The container_default_behavior option will change its default value from "compatibility" to '
+ '"no_defaults" in community.docker 2.0.0. To remove this warning, please specify an explicit value for it now',
+ version='2.0.0', collection_name='community.docker' # was Ansible 2.14 / community.general 3.0.0
+ )
+ if self.module.params['container_default_behavior'] == 'compatibility':
+ old_default_values = dict(
+ auto_remove=False,
+ detach=True,
+ init=False,
+ interactive=False,
+ memory="0",
+ paused=False,
+ privileged=False,
+ read_only=False,
+ tty=False,
+ )
+ for param, value in old_default_values.items():
+ if self.module.params[param] is None:
+ self.module.params[param] = value
+
+
+def main():
+ argument_spec = dict(
+ auto_remove=dict(type='bool'),
+ blkio_weight=dict(type='int'),
+ capabilities=dict(type='list', elements='str'),
+ cap_drop=dict(type='list', elements='str'),
+ cgroup_parent=dict(type='str'),
+ cleanup=dict(type='bool', default=False),
+ command=dict(type='raw'),
+ comparisons=dict(type='dict'),
+ container_default_behavior=dict(type='str', choices=['compatibility', 'no_defaults']),
+ cpu_period=dict(type='int'),
+ cpu_quota=dict(type='int'),
+ cpus=dict(type='float'),
+ cpuset_cpus=dict(type='str'),
+ cpuset_mems=dict(type='str'),
+ cpu_shares=dict(type='int'),
+ default_host_ip=dict(type='str'),
+ detach=dict(type='bool'),
+ devices=dict(type='list', elements='str'),
+ device_read_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_write_bps=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='str'),
+ )),
+ device_read_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_write_iops=dict(type='list', elements='dict', options=dict(
+ path=dict(required=True, type='str'),
+ rate=dict(required=True, type='int'),
+ )),
+ device_requests=dict(type='list', elements='dict', options=dict(
+ capabilities=dict(type='list', elements='list'),
+ count=dict(type='int'),
+ device_ids=dict(type='list', elements='str'),
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ dns_servers=dict(type='list', elements='str'),
+ dns_opts=dict(type='list', elements='str'),
+ dns_search_domains=dict(type='list', elements='str'),
+ domainname=dict(type='str'),
+ entrypoint=dict(type='list', elements='str'),
+ env=dict(type='dict'),
+ env_file=dict(type='path'),
+ etc_hosts=dict(type='dict'),
+ exposed_ports=dict(type='list', elements='str', aliases=['exposed', 'expose']),
+ force_kill=dict(type='bool', default=False, aliases=['forcekill']),
+ groups=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ ignore_image=dict(type='bool', default=False),
+ image=dict(type='str'),
+ init=dict(type='bool'),
+ interactive=dict(type='bool'),
+ ipc_mode=dict(type='str'),
+ keep_volumes=dict(type='bool', default=True),
+ kernel_memory=dict(type='str'),
+ kill_signal=dict(type='str'),
+ labels=dict(type='dict'),
+ links=dict(type='list', elements='str'),
+ log_driver=dict(type='str'),
+ log_options=dict(type='dict', aliases=['log_opt']),
+ mac_address=dict(type='str'),
+ memory=dict(type='str'),
+ memory_reservation=dict(type='str'),
+ memory_swap=dict(type='str'),
+ memory_swappiness=dict(type='int'),
+ mounts=dict(type='list', elements='dict', options=dict(
+ target=dict(type='str', required=True),
+ source=dict(type='str'),
+ type=dict(type='str', choices=['bind', 'volume', 'tmpfs', 'npipe'], default='volume'),
+ read_only=dict(type='bool'),
+ consistency=dict(type='str', choices=['default', 'consistent', 'cached', 'delegated']),
+ propagation=dict(type='str', choices=['private', 'rprivate', 'shared', 'rshared', 'slave', 'rslave']),
+ no_copy=dict(type='bool'),
+ labels=dict(type='dict'),
+ volume_driver=dict(type='str'),
+ volume_options=dict(type='dict'),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='str'),
+ )),
+ name=dict(type='str', required=True),
+ network_mode=dict(type='str'),
+ networks=dict(type='list', elements='dict', options=dict(
+ name=dict(type='str', required=True),
+ ipv4_address=dict(type='str'),
+ ipv6_address=dict(type='str'),
+ aliases=dict(type='list', elements='str'),
+ links=dict(type='list', elements='str'),
+ )),
+ networks_cli_compatible=dict(type='bool', default=True),
+ oom_killer=dict(type='bool'),
+ oom_score_adj=dict(type='int'),
+ output_logs=dict(type='bool', default=False),
+ paused=dict(type='bool'),
+ pid_mode=dict(type='str'),
+ pids_limit=dict(type='int'),
+ privileged=dict(type='bool'),
+ published_ports=dict(type='list', elements='str', aliases=['ports']),
+ pull=dict(type='bool', default=False),
+ purge_networks=dict(type='bool', default=False),
+ read_only=dict(type='bool'),
+ recreate=dict(type='bool', default=False),
+ removal_wait_timeout=dict(type='float'),
+ restart=dict(type='bool', default=False),
+ restart_policy=dict(type='str', choices=['no', 'on-failure', 'always', 'unless-stopped']),
+ restart_retries=dict(type='int'),
+ runtime=dict(type='str'),
+ security_opts=dict(type='list', elements='str'),
+ shm_size=dict(type='str'),
+ state=dict(type='str', default='started', choices=['absent', 'present', 'started', 'stopped']),
+ stop_signal=dict(type='str'),
+ stop_timeout=dict(type='int'),
+ sysctls=dict(type='dict'),
+ tmpfs=dict(type='list', elements='str'),
+ tty=dict(type='bool'),
+ ulimits=dict(type='list', elements='str'),
+ user=dict(type='str'),
+ userns_mode=dict(type='str'),
+ uts=dict(type='str'),
+ volume_driver=dict(type='str'),
+ volumes=dict(type='list', elements='str'),
+ volumes_from=dict(type='list', elements='str'),
+ working_dir=dict(type='str'),
+ )
+
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClientContainer(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+ if client.module.params['networks_cli_compatible'] is True and client.module.params['networks'] and client.module.params['network_mode'] is None:
+ client.module.deprecate(
+ 'Please note that the default value for `network_mode` will change from not specified '
+ '(which is equal to `default`) to the name of the first network in `networks` if '
+ '`networks` has at least one entry and `networks_cli_compatible` is `true`. You can '
+ 'change the behavior now by explicitly setting `network_mode` to the name of the first '
+ 'network in `networks`, and remove this warning by setting `network_mode` to `default`. '
+ 'Please make sure that the value you set to `network_mode` equals the inspection result '
+ 'for existing containers, otherwise the module will recreate them. You can find out the '
+ 'correct value by running "docker inspect --format \'{{.HostConfig.NetworkMode}}\' <container_name>"',
+ version='2.0.0', collection_name='community.docker', # was Ansible 2.14 / community.general 3.0.0
+ )
+
+ try:
+ cm = ContainerManager(client)
+ client.module.exit_json(**sanitize_result(cm.results))
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container_info.py
new file mode 100644
index 00000000..92a73525
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_container_info.py
@@ -0,0 +1,145 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_container_info
+
+short_description: Retrieves facts about docker container
+
+description:
+ - Retrieves facts about a docker container.
+ - Essentially returns the output of C(docker inspect <name>), similar to what M(community.docker.docker_container)
+ returns for a non-absent container.
+
+
+options:
+ name:
+ description:
+ - The name of the container to inspect.
+ - When identifying an existing container name may be a name or a long or short container ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+'''
+
+EXAMPLES = '''
+- name: Get infos on container
+ community.docker.docker_container_info:
+ name: mydata
+ register: result
+
+- name: Does container exist?
+ ansible.builtin.debug:
+ msg: "The container {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about container
+ ansible.builtin.debug:
+ var: result.container
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the container exists.
+ type: bool
+ returned: always
+ sample: true
+container:
+ description:
+ - Facts representing the current state of the container. Matches the docker inspection output.
+ - Will be C(none) if container does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "AppArmorProfile": "",
+ "Args": [],
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/usr/bin/supervisord"
+ ],
+ "Domainname": "",
+ "Entrypoint": null,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "443/tcp": {},
+ "80/tcp": {}
+ },
+ "Hostname": "8e47bf643eb9",
+ "Image": "lnmp_nginx:v1",
+ "Labels": {},
+ "OnBuild": null,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/tmp/lnmp/nginx-sites/logs/": {}
+ },
+ ...
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ container = client.get_container(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if container else False),
+ container=container,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_host_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_host_info.py
new file mode 100644
index 00000000..9007221a
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_host_info.py
@@ -0,0 +1,343 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_host_info
+
+short_description: Retrieves facts about docker host and lists of objects of the services.
+
+description:
+ - Retrieves facts about a docker host.
+ - Essentially returns the output of C(docker system info).
+ - The module also allows to list object names for containers, images, networks and volumes.
+ It also allows to query information on disk usage.
+ - The output differs depending on API version of the docker daemon.
+ - If the docker daemon cannot be contacted or does not meet the API version requirements,
+ the module will fail.
+
+
+options:
+ containers:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to list.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to list images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to list.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to list networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to list volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to list.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ disk_usage:
+ description:
+ - Summary information on used disk space by all Docker layers.
+ - The output is a sum of images, volumes, containers and build cache.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(yes)
+ then output will contain verbose information about objects matching the full output of API method.
+ For details see the documentation of your version of Docker API at L(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get info on docker host
+ community.docker.docker_host_info:
+ register: result
+
+- name: Get info on docker host and list images
+ community.docker.docker_host_info:
+ images: yes
+ register: result
+
+- name: Get info on docker host and list images matching the filter
+ community.docker.docker_host_info:
+ images: yes
+ images_filters:
+ label: "mylabel"
+ register: result
+
+- name: Get info on docker host and verbose list images
+ community.docker.docker_host_info:
+ images: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on docker host and used disk space
+ community.docker.docker_host_info:
+ disk_usage: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.host_info
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+
+host_info:
+ description:
+ - Facts representing the basic state of the docker host. Matches the C(docker system info) output.
+ returned: always
+ type: dict
+volumes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker volume ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(volumes) is C(yes)
+ type: list
+ elements: dict
+networks:
+ description:
+ - List of dict objects containing the basic information about each network.
+ Keys matches the C(docker network ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(networks) is C(yes)
+ type: list
+ elements: dict
+containers:
+ description:
+ - List of dict objects containing the basic information about each container.
+ Keys matches the C(docker container ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(containers) is C(yes)
+ type: list
+ elements: dict
+images:
+ description:
+ - List of dict objects containing the basic information about each image.
+ Keys matches the C(docker image ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(images) is C(yes)
+ type: list
+ elements: dict
+disk_usage:
+ description:
+ - Information on summary disk usage by images, containers and volumes on docker host
+ unless I(verbose_output=yes). See description for I(verbose_output).
+ returned: When I(disk_usage) is C(yes)
+ type: dict
+
+'''
+
+import traceback
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # Missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import clean_dict_booleans_for_docker_api
+
+
+class DockerHostManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerHostManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['volumes', 'networks', 'containers', 'images']
+
+ self.results['host_info'] = self.get_docker_host_info()
+
+ if self.client.module.params['disk_usage']:
+ self.results['disk_usage'] = self.get_docker_disk_usage_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+
+ def get_docker_host_info(self):
+ try:
+ return self.client.info()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_disk_usage_facts(self):
+ try:
+ if self.verbose_output:
+ return self.client.df()
+ else:
+ return dict(LayersSize=self.client.df()['LayersSize'])
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None, verbose=False):
+ items = None
+ items_list = []
+
+ header_containers = ['Id', 'Image', 'Command', 'Created', 'Status', 'Ports', 'Names']
+ header_volumes = ['Driver', 'Name']
+ header_images = ['Id', 'RepoTags', 'Created', 'Size']
+ header_networks = ['Id', 'Driver', 'Name', 'Scope']
+
+ filter_arg = dict()
+ if filters:
+ filter_arg['filters'] = filters
+ try:
+ if docker_object == 'containers':
+ items = self.client.containers(**filter_arg)
+ elif docker_object == 'networks':
+ items = self.client.networks(**filter_arg)
+ elif docker_object == 'images':
+ items = self.client.images(**filter_arg)
+ elif docker_object == 'volumes':
+ items = self.client.volumes(**filter_arg)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker host for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ if docker_object != 'volumes':
+ return items
+ else:
+ return items['Volumes']
+
+ if docker_object == 'volumes':
+ items = items['Volumes']
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'containers':
+ for key in header_containers:
+ item_record[key] = item.get(key)
+ elif docker_object == 'networks':
+ for key in header_networks:
+ item_record[key] = item.get(key)
+ elif docker_object == 'images':
+ for key in header_images:
+ item_record[key] = item.get(key)
+ elif docker_object == 'volumes':
+ for key in header_volumes:
+ item_record[key] = item.get(key)
+ items_list.append(item_record)
+
+ return items_list
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ disk_usage=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+
+ option_minimal_versions = dict(
+ network_filters=dict(docker_py_version='2.0.2'),
+ disk_usage=dict(docker_py_version='2.2.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerHostManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image.py
new file mode 100644
index 00000000..07728813
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image.py
@@ -0,0 +1,854 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image
+
+short_description: Manage docker images.
+
+
+description:
+ - Build, load or pull an image, making the image available for creating containers. Also supports tagging an
+ image into a repository and archiving an image to a C(.tar) file.
+
+options:
+ source:
+ description:
+ - "Determines where the module will try to retrieve the image from."
+ - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must
+ be specified when this value is used."
+ - "Use C(load) to load the image from a C(.tar) file. I(load_path) must
+ be specified when this value is used."
+ - "Use C(pull) to pull the image from a registry."
+ - "Use C(local) to make sure that the image is already available on the local
+ docker daemon, i.e. do not try to build, pull or load the image."
+ type: str
+ choices:
+ - build
+ - load
+ - pull
+ - local
+ build:
+ description:
+ - "Specifies options used for building images."
+ type: dict
+ suboptions:
+ cache_from:
+ description:
+ - List of image names to consider as cache source.
+ type: list
+ elements: str
+ dockerfile:
+ description:
+ - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image.
+ - This can also include a relative path (relative to I(path)).
+ type: str
+ http_timeout:
+ description:
+ - Timeout for HTTP requests during the image build operation. Provide a positive integer value for the number of
+ seconds.
+ type: int
+ path:
+ description:
+ - Use with state 'present' to build an image. Will be the path to a directory containing the context and
+ Dockerfile for building an image.
+ type: path
+ required: yes
+ pull:
+ description:
+ - When building an image downloads any updates to the FROM image in Dockerfile.
+ type: bool
+ default: no
+ rm:
+ description:
+ - Remove intermediate containers after build.
+ type: bool
+ default: yes
+ network:
+ description:
+ - The network to use for C(RUN) build instructions.
+ type: str
+ nocache:
+ description:
+ - Do not use cache when building an image.
+ type: bool
+ default: no
+ etc_hosts:
+ description:
+ - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address.
+ type: dict
+ args:
+ description:
+ - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive.
+ - Docker expects the value to be a string. For convenience any non-string values will be converted to strings.
+ - Requires Docker API >= 1.21.
+ type: dict
+ container_limits:
+ description:
+ - A dictionary of limits applied to each container created by the build process.
+ type: dict
+ suboptions:
+ memory:
+ description:
+ - Set memory limit for build.
+ type: int
+ memswap:
+ description:
+ - Total memory (memory + swap), -1 to disable swap.
+ type: int
+ cpushares:
+ description:
+ - CPU shares (relative weight).
+ type: int
+ cpusetcpus:
+ description:
+ - CPUs in which to allow execution, e.g., "0-3", "0,1".
+ type: str
+ use_config_proxy:
+ description:
+ - If set to C(yes) and a proxy configuration is specified in the docker client configuration
+ (by default C($HOME/.docker/config.json)), the corresponding environment variables will
+ be set in the container being built.
+ - Needs Docker SDK for Python >= 3.7.0.
+ type: bool
+ target:
+ description:
+ - When building an image specifies an intermediate build stage by
+ name as a final stage for the resulting image.
+ type: str
+ platform:
+ description:
+ - Platform in the format C(os[/arch[/variant]]).
+ type: str
+ version_added: 1.1.0
+ archive_path:
+ description:
+ - Use with state C(present) to archive an image to a .tar file.
+ type: path
+ load_path:
+ description:
+ - Use with state C(present) to load an image from a .tar file.
+ - Set I(source) to C(load) if you want to load the image.
+ type: path
+ force_source:
+ description:
+ - Use with state C(present) to build, load or pull an image (depending on the
+ value of the I(source) option) when the image already exists.
+ type: bool
+ default: false
+ force_absent:
+ description:
+ - Use with state I(absent) to un-tag and remove all images matching the specified name.
+ type: bool
+ default: false
+ force_tag:
+ description:
+ - Use with state C(present) to force tagging an image.
+ type: bool
+ default: false
+ name:
+ description:
+ - "Image name. Name format will be one of: name, repository/name, registry_server:port/name.
+ When pushing or pulling an image the name can optionally include the tag by appending ':tag_name'."
+ - Note that image IDs (hashes) are not supported.
+ type: str
+ required: yes
+ push:
+ description:
+ - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter.
+ type: bool
+ default: no
+ repository:
+ description:
+ - Full path to a repository. Use with state C(present) to tag the image into the repository. Expects
+ format I(repository:tag). If no tag is provided, will use the value of the C(tag) parameter or I(latest).
+ type: str
+ state:
+ description:
+ - Make assertions about the state of an image.
+ - When C(absent) an image will be removed. Use the force option to un-tag and remove all images
+ matching the provided name.
+ - When C(present) check if an image exists using the provided name and tag. If the image is not found or the
+ force option is used, the image will either be pulled, built or loaded, depending on the I(source) option.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+ tag:
+ description:
+ - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to
+ I(latest).
+ - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence.
+ type: str
+ default: latest
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Pavel Antonov (@softzilla)
+ - Chris Houseknecht (@chouseknecht)
+ - Sorin Sbarnea (@ssbarnea)
+
+'''
+
+EXAMPLES = '''
+
+- name: Pull an image
+ community.docker.docker_image:
+ name: pacur/centos-7
+ source: pull
+
+- name: Tag and push to docker hub
+ community.docker.docker_image:
+ name: pacur/centos-7:56
+ repository: dcoppenhagan/myimage:7.56
+ push: yes
+ source: local
+
+- name: Tag and push to local registry
+ community.docker.docker_image:
+ # Image will be centos:7
+ name: centos
+ # Will be pushed to localhost:5000/centos:7
+ repository: localhost:5000/centos
+ tag: 7
+ push: yes
+ source: local
+
+- name: Add tag latest to image
+ community.docker.docker_image:
+ name: myimage:7.1.2
+ repository: myimage:latest
+ # As 'latest' usually already is present, we need to enable overwriting of existing tags:
+ force_tag: yes
+ source: local
+
+- name: Remove image
+ community.docker.docker_image:
+ state: absent
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+
+- name: Build an image and push it to a private repo
+ community.docker.docker_image:
+ build:
+ path: ./sinatra
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ push: yes
+ source: build
+
+- name: Archive image
+ community.docker.docker_image:
+ name: registry.ansible.com/chouseknecht/sinatra
+ tag: v1
+ archive_path: my_sinatra.tar
+ source: local
+
+- name: Load image from archive and push to a private registry
+ community.docker.docker_image:
+ name: localhost:5000/myimages/sinatra
+ tag: v1
+ push: yes
+ load_path: my_sinatra.tar
+ source: load
+
+- name: Build image and with build args
+ community.docker.docker_image:
+ name: myimage
+ build:
+ path: /path/to/build/dir
+ args:
+ log_volume: /var/log/myapp
+ listen_port: 8080
+ source: build
+
+- name: Build image using cache source
+ community.docker.docker_image:
+ name: myimage:latest
+ build:
+ path: /path/to/build/dir
+ # Use as cache source for building myimage
+ cache_from:
+ - nginx:latest
+ - alpine:3.8
+ source: build
+'''
+
+RETURN = '''
+image:
+ description: Image inspection results for the affected image.
+ returned: success
+ type: dict
+ sample: {}
+stdout:
+ description: Docker build output when building an image.
+ returned: success
+ type: str
+ sample: ""
+ version_added: 1.0.0
+'''
+
+import errno
+import os
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ clean_dict_booleans_for_docker_api,
+ docker_version,
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ is_valid_tag,
+ RequestException,
+)
+from ansible.module_utils._text import to_native
+
+if docker_version is not None:
+ try:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.auth import resolve_repository_name
+ else:
+ from docker.auth.auth import resolve_repository_name
+ from docker.utils.utils import parse_repository_tag
+ from docker.errors import DockerException
+ except ImportError:
+ # missing Docker SDK for Python handled in module_utils.docker.common
+ pass
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.source = parameters['source']
+ build = parameters['build'] or dict()
+ self.archive_path = parameters.get('archive_path')
+ self.cache_from = build.get('cache_from')
+ self.container_limits = build.get('container_limits')
+ self.dockerfile = build.get('dockerfile')
+ self.force_source = parameters.get('force_source')
+ self.force_absent = parameters.get('force_absent')
+ self.force_tag = parameters.get('force_tag')
+ self.load_path = parameters.get('load_path')
+ self.name = parameters.get('name')
+ self.network = build.get('network')
+ self.extra_hosts = clean_dict_booleans_for_docker_api(build.get('etc_hosts'))
+ self.nocache = build.get('nocache', False)
+ self.build_path = build.get('path')
+ self.pull = build.get('pull')
+ self.target = build.get('target')
+ self.repository = parameters.get('repository')
+ self.rm = build.get('rm', True)
+ self.state = parameters.get('state')
+ self.tag = parameters.get('tag')
+ self.http_timeout = build.get('http_timeout')
+ self.push = parameters.get('push')
+ self.buildargs = build.get('args')
+ self.build_platform = build.get('platform')
+ self.use_config_proxy = build.get('use_config_proxy')
+
+ # If name contains a tag, it takes precedence over tag parameter.
+ if not is_image_name_id(self.name):
+ repo, repo_tag = parse_repository_tag(self.name)
+ if repo_tag:
+ self.name = repo
+ self.tag = repo_tag
+
+ if self.state == 'present':
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def present(self):
+ '''
+ Handles state = 'present', which includes building, loading or pulling an image,
+ depending on user provided parameters.
+
+ :returns None
+ '''
+ image = self.client.find_image(name=self.name, tag=self.tag)
+
+ if not image or self.force_source:
+ if self.source == 'build':
+ # Build the image
+ if not os.path.isdir(self.build_path):
+ self.fail("Requested build path %s could not be found or you do not have access." % self.build_path)
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.log("Building image %s" % image_name)
+ self.results['actions'].append("Built image %s from %s" % (image_name, self.build_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results.update(self.build_image())
+
+ elif self.source == 'load':
+ # Load the image from an archive
+ if not os.path.isfile(self.load_path):
+ self.fail("Error loading image %s. Specified path %s does not exist." % (self.name,
+ self.load_path))
+ image_name = self.name
+ if self.tag:
+ image_name = "%s:%s" % (self.name, self.tag)
+ self.results['actions'].append("Loaded image %s from %s" % (image_name, self.load_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'] = self.load_image()
+ elif self.source == 'pull':
+ # pull the image
+ self.results['actions'].append('Pulled image %s:%s' % (self.name, self.tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.results['image'], dummy = self.client.pull_image(self.name, tag=self.tag)
+ elif self.source == 'local':
+ if image is None:
+ name = self.name
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ self.client.fail('Cannot find the image %s locally.' % name)
+ if not self.check_mode and image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if self.archive_path:
+ self.archive_image(self.name, self.tag)
+
+ if self.push and not self.repository:
+ self.push_image(self.name, self.tag)
+ elif self.repository:
+ self.tag_image(self.name, self.tag, self.repository, push=self.push)
+
+ def absent(self):
+ '''
+ Handles state = 'absent', which removes an image.
+
+ :return None
+ '''
+ name = self.name
+ if is_image_name_id(name):
+ image = self.client.find_image_by_id(name)
+ else:
+ image = self.client.find_image(name, self.tag)
+ if self.tag:
+ name = "%s:%s" % (self.name, self.tag)
+ if image:
+ if not self.check_mode:
+ try:
+ self.client.remove_image(name, force=self.force_absent)
+ except Exception as exc:
+ self.fail("Error removing image %s - %s" % (name, str(exc)))
+
+ self.results['changed'] = True
+ self.results['actions'].append("Removed image %s" % (name))
+ self.results['image']['state'] = 'Deleted'
+
+ def archive_image(self, name, tag):
+ '''
+ Archive an image to a .tar file. Called when archive_path is passed.
+
+ :param name - name of the image. Type: str
+ :return None
+ '''
+
+ if not tag:
+ tag = "latest"
+
+ image = self.client.find_image(name=name, tag=tag)
+ if not image:
+ self.log("archive image: image %s:%s not found" % (name, tag))
+ return
+
+ image_name = "%s:%s" % (name, tag)
+ self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path))
+ self.results['changed'] = True
+ if not self.check_mode:
+ self.log("Getting archive of image %s" % image_name)
+ try:
+ image = self.client.get_image(image_name)
+ except Exception as exc:
+ self.fail("Error getting image %s - %s" % (image_name, str(exc)))
+
+ try:
+ with open(self.archive_path, 'wb') as fd:
+ if self.client.docker_py_version >= LooseVersion('3.0.0'):
+ for chunk in image:
+ fd.write(chunk)
+ else:
+ for chunk in image.stream(2048, decode_content=False):
+ fd.write(chunk)
+ except Exception as exc:
+ self.fail("Error writing image archive %s - %s" % (self.archive_path, str(exc)))
+
+ image = self.client.find_image(name=name, tag=tag)
+ if image:
+ self.results['image'] = image
+
+ def push_image(self, name, tag=None):
+ '''
+ If the name of the image contains a repository path, then push the image.
+
+ :param name Name of the image to push.
+ :param tag Use a specific tag.
+ :return: None
+ '''
+
+ repository = name
+ if not tag:
+ repository, tag = parse_repository_tag(name)
+ registry, repo_name = resolve_repository_name(repository)
+
+ self.log("push %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+
+ if registry:
+ self.results['actions'].append("Pushed image %s to %s/%s:%s" % (self.name, registry, repo_name, tag))
+ self.results['changed'] = True
+ if not self.check_mode:
+ status = None
+ try:
+ changed = False
+ for line in self.client.push(repository, tag=tag, stream=True, decode=True):
+ self.log(line, pretty_print=True)
+ if line.get('errorDetail'):
+ raise Exception(line['errorDetail']['message'])
+ status = line.get('status')
+ if status == 'Pushing':
+ changed = True
+ self.results['changed'] = changed
+ except Exception as exc:
+ if re.search('unauthorized', str(exc)):
+ if re.search('authentication required', str(exc)):
+ self.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." %
+ (registry, repo_name, tag, str(exc), registry))
+ else:
+ self.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" %
+ (registry, repo_name, tag, str(exc)))
+ self.fail("Error pushing image %s: %s" % (repository, str(exc)))
+ self.results['image'] = self.client.find_image(name=repository, tag=tag)
+ if not self.results['image']:
+ self.results['image'] = dict()
+ self.results['image']['push_status'] = status
+
+ def tag_image(self, name, tag, repository, push=False):
+ '''
+ Tag an image into a repository.
+
+ :param name: name of the image. required.
+ :param tag: image tag.
+ :param repository: path to the repository. required.
+ :param push: bool. push the image once it's tagged.
+ :return: None
+ '''
+ repo, repo_tag = parse_repository_tag(repository)
+ if not repo_tag:
+ repo_tag = "latest"
+ if tag:
+ repo_tag = tag
+ image = self.client.find_image(name=repo, tag=repo_tag)
+ found = 'found' if image else 'not found'
+ self.log("image %s was %s" % (repo, found))
+
+ if not image or self.force_tag:
+ self.log("tagging %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ self.results['changed'] = True
+ self.results['actions'].append("Tagged image %s:%s to %s:%s" % (name, tag, repo, repo_tag))
+ if not self.check_mode:
+ try:
+ # Finding the image does not always work, especially running a localhost registry. In those
+ # cases, if we don't set force=True, it errors.
+ image_name = name
+ if tag and not re.search(tag, name):
+ image_name = "%s:%s" % (name, tag)
+ tag_status = self.client.tag(image_name, repo, tag=repo_tag, force=True)
+ if not tag_status:
+ raise Exception("Tag operation failed.")
+ except Exception as exc:
+ self.fail("Error: failed to tag image - %s" % str(exc))
+ self.results['image'] = self.client.find_image(name=repo, tag=repo_tag)
+ if image and image['Id'] == self.results['image']['Id']:
+ self.results['changed'] = False
+
+ if push:
+ self.push_image(repo, repo_tag)
+
+ @staticmethod
+ def _extract_output_line(line, output):
+ '''
+ Extract text line from stream output and, if found, adds it to output.
+ '''
+ if 'stream' in line or 'status' in line:
+ # Make sure we have a string (assuming that line['stream'] and
+ # line['status'] are either not defined, falsish, or a string)
+ text_line = line.get('stream') or line.get('status') or ''
+ output.append(text_line)
+
+ def build_image(self):
+ '''
+ Build an image
+
+ :return: image dict
+ '''
+ params = dict(
+ path=self.build_path,
+ tag=self.name,
+ rm=self.rm,
+ nocache=self.nocache,
+ timeout=self.http_timeout,
+ pull=self.pull,
+ forcerm=self.rm,
+ dockerfile=self.dockerfile,
+ decode=True,
+ )
+ if self.client.docker_py_version < LooseVersion('3.0.0'):
+ params['stream'] = True
+
+ if self.tag:
+ params['tag'] = "%s:%s" % (self.name, self.tag)
+ if self.container_limits:
+ params['container_limits'] = self.container_limits
+ if self.buildargs:
+ for key, value in self.buildargs.items():
+ self.buildargs[key] = to_native(value)
+ params['buildargs'] = self.buildargs
+ if self.cache_from:
+ params['cache_from'] = self.cache_from
+ if self.network:
+ params['network_mode'] = self.network
+ if self.extra_hosts:
+ params['extra_hosts'] = self.extra_hosts
+ if self.use_config_proxy:
+ params['use_config_proxy'] = self.use_config_proxy
+ # Due to a bug in docker-py, it will crash if
+ # use_config_proxy is True and buildargs is None
+ if 'buildargs' not in params:
+ params['buildargs'] = {}
+ if self.target:
+ params['target'] = self.target
+ if self.build_platform is not None:
+ params['platform'] = self.build_platform
+
+ build_output = []
+ for line in self.client.build(**params):
+ # line = json.loads(line)
+ self.log(line, pretty_print=True)
+ self._extract_output_line(line, build_output)
+
+ if line.get('error'):
+ if line.get('errorDetail'):
+ errorDetail = line.get('errorDetail')
+ self.fail(
+ "Error building %s - code: %s, message: %s, logs: %s" % (
+ self.name,
+ errorDetail.get('code'),
+ errorDetail.get('message'),
+ build_output))
+ else:
+ self.fail("Error building %s - message: %s, logs: %s" % (
+ self.name, line.get('error'), build_output))
+
+ return {"stdout": "\n".join(build_output),
+ "image": self.client.find_image(name=self.name, tag=self.tag)}
+
+ def load_image(self):
+ '''
+ Load an image from a .tar archive
+
+ :return: image dict
+ '''
+ # Load image(s) from file
+ load_output = []
+ has_output = False
+ try:
+ self.log("Opening image %s" % self.load_path)
+ with open(self.load_path, 'rb') as image_tar:
+ self.log("Loading image from %s" % self.load_path)
+ output = self.client.load_image(image_tar)
+ if output is not None:
+ # Old versions of Docker SDK of Python (before version 2.5.0) do not return anything.
+ # (See https://github.com/docker/docker-py/commit/7139e2d8f1ea82340417add02090bfaf7794f159)
+ # Note that before that commit, something else than None was returned, but that was also
+ # only introduced in a commit that first appeared in 2.5.0 (see
+ # https://github.com/docker/docker-py/commit/9e793806ff79559c3bc591d8c52a3bbe3cdb7350).
+ # So the above check works for every released version of Docker SDK for Python.
+ has_output = True
+ for line in output:
+ self.log(line, pretty_print=True)
+ self._extract_output_line(line, load_output)
+ else:
+ if LooseVersion(docker_version) < LooseVersion('2.5.0'):
+ self.client.module.warn(
+ 'The installed version of the Docker SDK for Python does not return the loading results'
+ ' from the Docker daemon. Therefore, we cannot verify whether the expected image was'
+ ' loaded, whether multiple images where loaded, or whether the load actually succeeded.'
+ ' If you are not stuck with Python 2.6, *please* upgrade to a version newer than 2.5.0'
+ ' (2.5.0 was released in August 2017).'
+ )
+ else:
+ self.client.module.warn(
+ 'The API version of your Docker daemon is < 1.23, which does not return the image'
+ ' loading result from the Docker daemon. Therefore, we cannot verify whether the'
+ ' expected image was loaded, whether multiple images where loaded, or whether the load'
+ ' actually succeeded. You should consider upgrading your Docker daemon.'
+ )
+ except EnvironmentError as exc:
+ if exc.errno == errno.ENOENT:
+ self.client.fail("Error opening image %s - %s" % (self.load_path, str(exc)))
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+ except Exception as exc:
+ self.client.fail("Error loading image %s - %s" % (self.name, str(exc)), stdout='\n'.join(load_output))
+
+ # Collect loaded images
+ if has_output:
+ # We can only do this when we actually got some output from Docker daemon
+ loaded_images = set()
+ for line in load_output:
+ if line.startswith('Loaded image:'):
+ loaded_images.add(line[len('Loaded image:'):].strip())
+
+ if not loaded_images:
+ self.client.fail("Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output))
+
+ expected_image = '%s:%s' % (self.name, self.tag)
+ if expected_image not in loaded_images:
+ self.client.fail(
+ "The archive did not contain image '%s'. Instead, found %s." % (
+ expected_image, ', '.join(["'%s'" % image for image in sorted(loaded_images)])),
+ stdout='\n'.join(load_output))
+ loaded_images.remove(expected_image)
+
+ if loaded_images:
+ self.client.module.warn(
+ "The archive contained more images than specified: %s" % (
+ ', '.join(["'%s'" % image for image in sorted(loaded_images)]), ))
+
+ return self.client.find_image(self.name, self.tag)
+
+
+def main():
+ argument_spec = dict(
+ source=dict(type='str', choices=['build', 'load', 'pull', 'local']),
+ build=dict(type='dict', options=dict(
+ cache_from=dict(type='list', elements='str'),
+ container_limits=dict(type='dict', options=dict(
+ memory=dict(type='int'),
+ memswap=dict(type='int'),
+ cpushares=dict(type='int'),
+ cpusetcpus=dict(type='str'),
+ )),
+ dockerfile=dict(type='str'),
+ http_timeout=dict(type='int'),
+ network=dict(type='str'),
+ nocache=dict(type='bool', default=False),
+ path=dict(type='path', required=True),
+ pull=dict(type='bool', default=False),
+ rm=dict(type='bool', default=True),
+ args=dict(type='dict'),
+ use_config_proxy=dict(type='bool'),
+ target=dict(type='str'),
+ etc_hosts=dict(type='dict'),
+ platform=dict(type='str'),
+ )),
+ archive_path=dict(type='path'),
+ force_source=dict(type='bool', default=False),
+ force_absent=dict(type='bool', default=False),
+ force_tag=dict(type='bool', default=False),
+ load_path=dict(type='path'),
+ name=dict(type='str', required=True),
+ push=dict(type='bool', default=False),
+ repository=dict(type='str'),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ tag=dict(type='str', default='latest'),
+ )
+
+ required_if = [
+ ('state', 'present', ['source']),
+ ('source', 'build', ['build']),
+ ('source', 'load', ['load_path']),
+ ]
+
+ def detect_build_cache_from(client):
+ return client.module.params['build'] and client.module.params['build'].get('cache_from') is not None
+
+ def detect_build_network(client):
+ return client.module.params['build'] and client.module.params['build'].get('network') is not None
+
+ def detect_build_target(client):
+ return client.module.params['build'] and client.module.params['build'].get('target') is not None
+
+ def detect_use_config_proxy(client):
+ return client.module.params['build'] and client.module.params['build'].get('use_config_proxy') is not None
+
+ def detect_etc_hosts(client):
+ return client.module.params['build'] and bool(client.module.params['build'].get('etc_hosts'))
+
+ def detect_platform(client):
+ return client.module.params['build'] and client.module.params['build'].get('platform') is not None
+
+ option_minimal_versions = dict()
+ option_minimal_versions["build.cache_from"] = dict(docker_py_version='2.1.0', docker_api_version='1.25', detect_usage=detect_build_cache_from)
+ option_minimal_versions["build.network"] = dict(docker_py_version='2.4.0', docker_api_version='1.25', detect_usage=detect_build_network)
+ option_minimal_versions["build.target"] = dict(docker_py_version='2.4.0', detect_usage=detect_build_target)
+ option_minimal_versions["build.use_config_proxy"] = dict(docker_py_version='3.7.0', detect_usage=detect_use_config_proxy)
+ option_minimal_versions["build.etc_hosts"] = dict(docker_py_version='2.6.0', docker_api_version='1.27', detect_usage=detect_etc_hosts)
+ option_minimal_versions["build.platform"] = dict(docker_py_version='3.0.0', docker_api_version='1.32', detect_usage=detect_platform)
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.20',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ if not is_valid_tag(client.module.params['tag'], allow_empty=True):
+ client.fail('"{0}" is not a valid docker tag!'.format(client.module.params['tag']))
+
+ if client.module.params['source'] == 'build':
+ if not client.module.params['build'] or not client.module.params['build'].get('path'):
+ client.fail('If "source" is set to "build", the "build.path" option must be specified.')
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ image={}
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image_info.py
new file mode 100644
index 00000000..5d855fa2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_image_info.py
@@ -0,0 +1,267 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_image_info
+
+short_description: Inspect docker images
+
+
+description:
+ - Provide one or more image names, and the module will inspect each, returning an array of inspection results.
+ - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists
+ locally, you can call the module with the image name, then check whether the result list is empty (image does not
+ exist) or has one element (the image exists locally).
+ - The module will not attempt to pull images from registries. Use M(community.docker.docker_image) with I(source) set to C(pull)
+ to ensure an image is pulled.
+
+notes:
+ - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change.
+
+options:
+ name:
+ description:
+ - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]),
+ where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also
+ image IDs can be used.
+ - If no name is provided, a list of all images will be returned.
+ type: list
+ elements: str
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.20"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+
+'''
+
+EXAMPLES = '''
+- name: Inspect a single image
+ community.docker.docker_image_info:
+ name: pacur/centos-7
+
+- name: Inspect multiple images
+ community.docker.docker_image_info:
+ name:
+ - pacur/centos-7
+ - sinatra
+ register: result
+
+- name: Make sure that both images pacur/centos-7 and sinatra exist locally
+ ansible.builtin.assert:
+ that:
+ - result.images | length == 2
+'''
+
+RETURN = '''
+images:
+ description:
+ - Inspection results for the selected images.
+ - The list only contains inspection results of images existing locally.
+ returned: always
+ type: list
+ elements: dict
+ sample: [
+ {
+ "Architecture": "amd64",
+ "Author": "",
+ "Comment": "",
+ "Config": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/etc/docker/registry/config.yml"
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Container": "e83a452b8fb89d78a25a6739457050131ca5c863629a47639530d9ad2008d610",
+ "ContainerConfig": {
+ "AttachStderr": false,
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "Cmd": [
+ "/bin/sh",
+ "-c",
+ '#(nop) CMD ["/etc/docker/registry/config.yml"]'
+ ],
+ "Domainname": "",
+ "Entrypoint": [
+ "/bin/registry"
+ ],
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "ExposedPorts": {
+ "5000/tcp": {}
+ },
+ "Hostname": "e5c68db50333",
+ "Image": "c72dce2618dc8f7b794d2b2c2b1e64e0205ead5befc294f8111da23bd6a2c799",
+ "Labels": {},
+ "OnBuild": [],
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Tty": false,
+ "User": "",
+ "Volumes": {
+ "/var/lib/registry": {}
+ },
+ "WorkingDir": ""
+ },
+ "Created": "2016-03-08T21:08:15.399680378Z",
+ "DockerVersion": "1.9.1",
+ "GraphDriver": {
+ "Data": null,
+ "Name": "aufs"
+ },
+ "Id": "53773d8552f07b730f3e19979e32499519807d67b344141d965463a950a66e08",
+ "Name": "registry:2",
+ "Os": "linux",
+ "Parent": "f0b1f729f784b755e7bf9c8c2e65d8a0a35a533769c2588f02895f6781ac0805",
+ "RepoDigests": [],
+ "RepoTags": [
+ "registry:2"
+ ],
+ "Size": 0,
+ "VirtualSize": 165808884
+ }
+ ]
+'''
+
+import traceback
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ is_image_name_id,
+ RequestException,
+)
+
+
+class ImageManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(ImageManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.name = self.client.module.params.get('name')
+ self.log("Gathering facts for images: %s" % (str(self.name)))
+
+ if self.name:
+ self.results['images'] = self.get_facts()
+ else:
+ self.results['images'] = self.get_all_images()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def get_facts(self):
+ '''
+ Lookup and inspect each image name found in the names parameter.
+
+ :returns array of image dictionaries
+ '''
+
+ results = []
+
+ names = self.name
+ if not isinstance(names, list):
+ names = [names]
+
+ for name in names:
+ if is_image_name_id(name):
+ self.log('Fetching image %s (ID)' % (name))
+ image = self.client.find_image_by_id(name)
+ else:
+ repository, tag = utils.parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ self.log('Fetching image %s:%s' % (repository, tag))
+ image = self.client.find_image(name=repository, tag=tag)
+ if image:
+ results.append(image)
+ return results
+
+ def get_all_images(self):
+ results = []
+ images = self.client.images()
+ for image in images:
+ try:
+ inspection = self.client.inspect_image(image['Id'])
+ except Exception as exc:
+ self.fail("Error inspecting image %s - %s" % (image['Id'], str(exc)))
+ results.append(inspection)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ images=[]
+ )
+
+ ImageManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_login.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_login.py
new file mode 100644
index 00000000..03d3f2a1
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_login.py
@@ -0,0 +1,486 @@
+#!/usr/bin/python
+#
+# (c) 2016 Olaf Kilian <olaf.kilian@symanex.com>
+# Chris Houseknecht, <house@redhat.com>
+# James Tanner, <jtanner@redhat.com>
+#
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_login
+short_description: Log into a Docker registry.
+description:
+ - Provides functionality similar to the "docker login" command.
+ - Authenticate with a docker registry and add the credentials to your local Docker config file respectively the
+ credentials store associated to the registry. Adding the credentials to the config files resp. the credential
+ store allows future connections to the registry using tools such as Ansible's Docker modules, the Docker CLI
+ and Docker SDK for Python without needing to provide credentials.
+ - Running in check mode will perform the authentication without updating the config file.
+options:
+ registry_url:
+ description:
+ - The registry URL.
+ type: str
+ default: "https://index.docker.io/v1/"
+ aliases:
+ - registry
+ - url
+ username:
+ description:
+ - The username for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ password:
+ description:
+ - The plaintext password for the registry account.
+ - Required when I(state) is C(present).
+ type: str
+ email:
+ description:
+ - Does nothing, do not use.
+ - Will be removed in community.docker 2.0.0.
+ type: str
+ reauthorize:
+ description:
+ - Refresh existing authentication found in the configuration file.
+ type: bool
+ default: no
+ aliases:
+ - reauth
+ config_path:
+ description:
+ - Custom path to the Docker CLI configuration file.
+ type: path
+ default: ~/.docker/config.json
+ aliases:
+ - dockercfg_path
+ state:
+ description:
+ - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out.
+ - To logout you only need the registry server, which defaults to DockerHub.
+ - Before 2.1 you could ONLY log in.
+ - Docker does not support 'logout' with a custom config file.
+ type: str
+ default: 'present'
+ choices: ['present', 'absent']
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "L(Python bindings for docker credentials store API) >= 0.2.1
+ (use L(docker-pycreds,https://pypi.org/project/docker-pycreds/) when using Docker SDK for Python < 4.0.0)"
+ - "Docker API >= 1.20"
+author:
+ - Olaf Kilian (@olsaki) <olaf.kilian@symanex.com>
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Log into DockerHub
+ community.docker.docker_login:
+ username: docker
+ password: rekcod
+
+- name: Log into private registry and force re-authorization
+ community.docker.docker_login:
+ registry_url: your.private.registry.io
+ username: yourself
+ password: secrets3
+ reauthorize: yes
+
+- name: Log into DockerHub using a custom config file
+ community.docker.docker_login:
+ username: docker
+ password: rekcod
+ config_path: /tmp/.mydockercfg
+
+- name: Log out of DockerHub
+ community.docker.docker_login:
+ state: absent
+'''
+
+RETURN = '''
+login_results:
+ description: Results from the login.
+ returned: when state='present'
+ type: dict
+ sample: {
+ "serveraddress": "localhost:5000",
+ "username": "testuser"
+ }
+'''
+
+import base64
+import json
+import os
+import re
+import traceback
+from ansible.module_utils._text import to_bytes, to_text
+
+try:
+ from docker.errors import DockerException
+ from docker import auth
+
+ # Earlier versions of docker/docker-py put decode_auth
+ # in docker.auth.auth instead of docker.auth
+ if hasattr(auth, 'decode_auth'):
+ from docker.auth import decode_auth
+ else:
+ from docker.auth.auth import decode_auth
+
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ HAS_DOCKER_PY,
+ DEFAULT_DOCKER_REGISTRY,
+ DockerBaseClass,
+ EMAIL_REGEX,
+ RequestException,
+)
+
+NEEDS_DOCKER_PYCREDS = False
+
+# Early versions of docker/docker-py rely on docker-pycreds for
+# the credential store api.
+if HAS_DOCKER_PY:
+ try:
+ from docker.credentials.errors import StoreError, CredentialsNotFound
+ from docker.credentials import Store
+ except ImportError:
+ try:
+ from dockerpycreds.errors import StoreError, CredentialsNotFound
+ from dockerpycreds.store import Store
+ except ImportError as exc:
+ HAS_DOCKER_ERROR = str(exc)
+ NEEDS_DOCKER_PYCREDS = True
+
+
+if NEEDS_DOCKER_PYCREDS:
+ # docker-pycreds missing, so we need to create some place holder classes
+ # to allow instantiation.
+
+ class StoreError(Exception):
+ pass
+
+ class CredentialsNotFound(Exception):
+ pass
+
+
+class DockerFileStore(object):
+ '''
+ A custom credential store class that implements only the functionality we need to
+ update the docker config file when no credential helpers is provided.
+ '''
+
+ program = "<legacy config>"
+
+ def __init__(self, config_path):
+ self._config_path = config_path
+
+ # Make sure we have a minimal config if none is available.
+ self._config = dict(
+ auths=dict()
+ )
+
+ try:
+ # Attempt to read the existing config.
+ with open(self._config_path, "r") as f:
+ config = json.load(f)
+ except (ValueError, IOError):
+ # No config found or an invalid config found so we'll ignore it.
+ config = dict()
+
+ # Update our internal config with what ever was loaded.
+ self._config.update(config)
+
+ @property
+ def config_path(self):
+ '''
+ Return the config path configured in this DockerFileStore instance.
+ '''
+
+ return self._config_path
+
+ def get(self, server):
+ '''
+ Retrieve credentials for `server` if there are any in the config file.
+ Otherwise raise a `StoreError`
+ '''
+
+ server_creds = self._config['auths'].get(server)
+ if not server_creds:
+ raise CredentialsNotFound('No matching credentials')
+
+ (username, password) = decode_auth(server_creds['auth'])
+
+ return dict(
+ Username=username,
+ Secret=password
+ )
+
+ def _write(self):
+ '''
+ Write config back out to disk.
+ '''
+ # Make sure directory exists
+ dir = os.path.dirname(self._config_path)
+ if not os.path.exists(dir):
+ os.makedirs(dir)
+ # Write config; make sure it has permissions 0x600
+ content = json.dumps(self._config, indent=4, sort_keys=True).encode('utf-8')
+ f = os.open(self._config_path, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o600)
+ try:
+ os.write(f, content)
+ finally:
+ os.close(f)
+
+ def store(self, server, username, password):
+ '''
+ Add a credentials for `server` to the current configuration.
+ '''
+
+ b64auth = base64.b64encode(
+ to_bytes(username) + b':' + to_bytes(password)
+ )
+ auth = to_text(b64auth)
+
+ # build up the auth structure
+ if 'auths' not in self._config:
+ self._config['auths'] = dict()
+
+ self._config['auths'][server] = dict(
+ auth=auth
+ )
+
+ self._write()
+
+ def erase(self, server):
+ '''
+ Remove credentials for the given server from the configuration.
+ '''
+
+ if 'auths' in self._config and server in self._config['auths']:
+ self._config['auths'].pop(server)
+ self._write()
+
+
+class LoginManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(LoginManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ parameters = self.client.module.params
+ self.check_mode = self.client.check_mode
+
+ self.registry_url = parameters.get('registry_url')
+ self.username = parameters.get('username')
+ self.password = parameters.get('password')
+ self.email = parameters.get('email')
+ self.reauthorize = parameters.get('reauthorize')
+ self.config_path = parameters.get('config_path')
+ self.state = parameters.get('state')
+
+ def run(self):
+ '''
+ Do the actuall work of this task here. This allows instantiation for partial
+ testing.
+ '''
+
+ if self.state == 'present':
+ self.login()
+ else:
+ self.logout()
+
+ def fail(self, msg):
+ self.client.fail(msg)
+
+ def login(self):
+ '''
+ Log into the registry with provided username/password. On success update the config
+ file with the new authorization.
+
+ :return: None
+ '''
+
+ if self.email and not re.match(EMAIL_REGEX, self.email):
+ self.fail("Parameter error: the email address appears to be incorrect. Expecting it to match "
+ "/%s/" % (EMAIL_REGEX))
+
+ self.results['actions'].append("Logged into %s" % (self.registry_url))
+ self.log("Log into %s with username %s" % (self.registry_url, self.username))
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=self.reauthorize,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+
+ # If user is already logged in, then response contains password for user
+ if 'password' in response:
+ # This returns correct password if user is logged in and wrong password is given.
+ # So if it returns another password as we passed, and the user didn't request to
+ # reauthorize, still do it.
+ if not self.reauthorize and response['password'] != self.password:
+ try:
+ response = self.client.login(
+ self.username,
+ password=self.password,
+ email=self.email,
+ registry=self.registry_url,
+ reauth=True,
+ dockercfg_path=self.config_path
+ )
+ except Exception as exc:
+ self.fail("Logging into %s for user %s failed - %s" % (self.registry_url, self.username, str(exc)))
+ response.pop('password', None)
+ self.results['login_result'] = response
+
+ self.update_credentials()
+
+ def logout(self):
+ '''
+ Log out of the registry. On success update the config file.
+
+ :return: None
+ '''
+
+ # Get the configuration store.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ self.log("Credentials for %s not present, doing nothing." % (self.registry_url))
+ self.results['changed'] = False
+ return
+
+ if not self.check_mode:
+ store.erase(self.registry_url)
+ self.results['changed'] = True
+
+ def update_credentials(self):
+ '''
+ If the authorization is not stored attempt to store authorization values via
+ the appropriate credential helper or to the config file.
+
+ :return: None
+ '''
+
+ # Check to see if credentials already exist.
+ store = self.get_credential_store_instance(self.registry_url, self.config_path)
+
+ try:
+ current = store.get(self.registry_url)
+ except CredentialsNotFound:
+ # get raises an exception on not found.
+ current = dict(
+ Username='',
+ Secret=''
+ )
+
+ if current['Username'] != self.username or current['Secret'] != self.password or self.reauthorize:
+ if not self.check_mode:
+ store.store(self.registry_url, self.username, self.password)
+ self.log("Writing credentials to configured helper %s for %s" % (store.program, self.registry_url))
+ self.results['actions'].append("Wrote credentials to configured helper %s for %s" % (
+ store.program, self.registry_url))
+ self.results['changed'] = True
+
+ def get_credential_store_instance(self, registry, dockercfg_path):
+ '''
+ Return an instance of docker.credentials.Store used by the given registry.
+
+ :return: A Store or None
+ :rtype: Union[docker.credentials.Store, NoneType]
+ '''
+
+ # Older versions of docker-py don't have this feature.
+ try:
+ credstore_env = self.client.credstore_env
+ except AttributeError:
+ credstore_env = None
+
+ config = auth.load_config(config_path=dockercfg_path)
+
+ if hasattr(auth, 'get_credential_store'):
+ store_name = auth.get_credential_store(config, registry)
+ elif 'credsStore' in config:
+ store_name = config['credsStore']
+ else:
+ store_name = None
+
+ # Make sure that there is a credential helper before trying to instantiate a
+ # Store object.
+ if store_name:
+ self.log("Found credential store %s" % store_name)
+ return Store(store_name, environment=credstore_env)
+
+ return DockerFileStore(dockercfg_path)
+
+
+def main():
+
+ argument_spec = dict(
+ registry_url=dict(type='str', default=DEFAULT_DOCKER_REGISTRY, aliases=['registry', 'url']),
+ username=dict(type='str'),
+ password=dict(type='str', no_log=True),
+ # Was Ansible 2.14 / community.general 3.0.0:
+ email=dict(type='str', removed_in_version='2.0.0', removed_from_collection='community.docker'),
+ reauthorize=dict(type='bool', default=False, aliases=['reauth']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ config_path=dict(type='path', default='~/.docker/config.json', aliases=['dockercfg_path']),
+ )
+
+ required_if = [
+ ('state', 'present', ['username', 'password']),
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_api_version='1.20',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ actions=[],
+ login_result={}
+ )
+
+ manager = LoginManager(client, results)
+ manager.run()
+
+ if 'actions' in results:
+ del results['actions']
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network.py
new file mode 100644
index 00000000..d2ecf39c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network.py
@@ -0,0 +1,672 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_network
+short_description: Manage Docker networks
+description:
+ - Create/remove Docker networks and connect containers to them.
+ - Performs largely the same function as the "docker network" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the network to operate on.
+ type: str
+ required: yes
+ aliases:
+ - network_name
+
+ connected:
+ description:
+ - List of container names or container IDs to connect to a network.
+ - Please note that the module only makes sure that these containers are connected to the network,
+ but does not care about connection options. If you rely on specific IP addresses etc., use the
+ M(community.docker.docker_container) module to ensure your containers are correctly connected to this network.
+ type: list
+ elements: str
+ aliases:
+ - containers
+
+ driver:
+ description:
+ - Specify the type of network. Docker provides bridge and overlay drivers, but 3rd party drivers can also be used.
+ type: str
+ default: bridge
+
+ driver_options:
+ description:
+ - Dictionary of network settings. Consult docker docs for valid options and values.
+ type: dict
+
+ force:
+ description:
+ - With state C(absent) forces disconnecting all containers from the
+ network prior to deleting the network. With state C(present) will
+ disconnect all containers, delete the network and re-create the
+ network.
+ - This option is required if you have changed the IPAM or driver options
+ and want an existing network to be updated to use the new options.
+ type: bool
+ default: no
+
+ appends:
+ description:
+ - By default the connected list is canonical, meaning containers not on the list are removed from the network.
+ - Use I(appends) to leave existing containers connected.
+ type: bool
+ default: no
+ aliases:
+ - incremental
+
+ enable_ipv6:
+ description:
+ - Enable IPv6 networking.
+ type: bool
+
+ ipam_driver:
+ description:
+ - Specify an IPAM driver.
+ type: str
+
+ ipam_driver_options:
+ description:
+ - Dictionary of IPAM driver options.
+ type: dict
+
+ ipam_config:
+ description:
+ - List of IPAM config blocks. Consult
+ L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values.
+ Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python).
+ type: list
+ elements: dict
+ suboptions:
+ subnet:
+ description:
+ - IP subset in CIDR notation.
+ type: str
+ iprange:
+ description:
+ - IP address range in CIDR notation.
+ type: str
+ gateway:
+ description:
+ - IP gateway address.
+ type: str
+ aux_addresses:
+ description:
+ - Auxiliary IP addresses used by Network driver, as a mapping from hostname to IP.
+ type: dict
+
+ state:
+ description:
+ - C(absent) deletes the network. If a network has connected containers, it
+ cannot be deleted. Use the I(force) option to disconnect all containers
+ and delete the network.
+ - C(present) creates the network, if it does not already exist with the
+ specified parameters, and connects the list of containers provided via
+ the connected parameter. Containers not on the list will be disconnected.
+ An empty list will leave no containers connected to the network. Use the
+ I(appends) option to leave existing containers connected. Use the I(force)
+ options to force re-creation of the network.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+ internal:
+ description:
+ - Restrict external access to the network.
+ type: bool
+
+ labels:
+ description:
+ - Dictionary of labels.
+ type: dict
+
+ scope:
+ description:
+ - Specify the network's scope.
+ type: str
+ choices:
+ - local
+ - global
+ - swarm
+
+ attachable:
+ description:
+ - If enabled, and the network is in the global scope, non-service containers on worker nodes will be able to connect to the network.
+ type: bool
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+notes:
+ - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network.
+ It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific
+ connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the
+ network, loop the M(community.docker.docker_container) module to loop over your containers to make sure they are connected properly.
+ - The module does not support Docker Swarm, i.e. it will not try to disconnect or reconnect services. If services are connected to the
+ network, deleting the network will fail. When network options are changed, the network has to be deleted and recreated, so this will
+ fail as well.
+
+author:
+ - "Ben Keith (@keitwb)"
+ - "Chris Houseknecht (@chouseknecht)"
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.10.0"
+'''
+
+EXAMPLES = '''
+- name: Create a network
+ community.docker.docker_network:
+ name: network_one
+
+- name: Remove all but selected list of containers
+ community.docker.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ - container_b
+ - container_c
+
+- name: Remove a single container
+ community.docker.docker_network:
+ name: network_one
+ connected: "{{ fulllist|difference(['container_a']) }}"
+
+- name: Add a container to a network, leaving existing containers connected
+ community.docker.docker_network:
+ name: network_one
+ connected:
+ - container_a
+ appends: yes
+
+- name: Create a network with driver options
+ community.docker.docker_network:
+ name: network_two
+ driver_options:
+ com.docker.network.bridge.name: net2
+
+- name: Create a network with custom IPAM config
+ community.docker.docker_network:
+ name: network_three
+ ipam_config:
+ - subnet: 172.3.27.0/24
+ gateway: 172.3.27.2
+ iprange: 172.3.27.0/26
+ aux_addresses:
+ host1: 172.3.27.3
+ host2: 172.3.27.4
+
+- name: Create a network with labels
+ community.docker.docker_network:
+ name: network_four
+ labels:
+ key1: value1
+ key2: value2
+
+- name: Create a network with IPv6 IPAM config
+ community.docker.docker_network:
+ name: network_ipv6_one
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: fdd1:ac8c:0557:7ce1::/64
+
+- name: Create a network with IPv6 and custom IPv4 IPAM config
+ community.docker.docker_network:
+ name: network_ipv6_two
+ enable_ipv6: yes
+ ipam_config:
+ - subnet: 172.4.27.0/24
+ - subnet: fdd1:ac8c:0557:7ce2::/64
+
+- name: Delete a network, disconnecting all containers
+ community.docker.docker_network:
+ name: network_one
+ state: absent
+ force: yes
+'''
+
+RETURN = '''
+network:
+ description:
+ - Network inspection results for the affected network.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import re
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ docker_version,
+ DifferenceTracker,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+try:
+ from docker import utils
+ from docker.errors import DockerException
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ from docker.types import IPAMPool, IPAMConfig
+except Exception:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.name = None
+ self.connected = None
+ self.driver = None
+ self.driver_options = None
+ self.ipam_driver = None
+ self.ipam_driver_options = None
+ self.ipam_config = None
+ self.appends = None
+ self.force = None
+ self.internal = None
+ self.labels = None
+ self.debug = None
+ self.enable_ipv6 = None
+ self.scope = None
+ self.attachable = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+def container_names_in_network(network):
+ return [c['Name'] for c in network['Containers'].values()] if network['Containers'] else []
+
+
+CIDR_IPV4 = re.compile(r'^([0-9]{1,3}\.){3}[0-9]{1,3}/([0-9]|[1-2][0-9]|3[0-2])$')
+CIDR_IPV6 = re.compile(r'^[0-9a-fA-F:]+/([0-9]|[1-9][0-9]|1[0-2][0-9])$')
+
+
+def validate_cidr(cidr):
+ """Validate CIDR. Return IP version of a CIDR string on success.
+
+ :param cidr: Valid CIDR
+ :type cidr: str
+ :return: ``ipv4`` or ``ipv6``
+ :rtype: str
+ :raises ValueError: If ``cidr`` is not a valid CIDR
+ """
+ if CIDR_IPV4.match(cidr):
+ return 'ipv4'
+ elif CIDR_IPV6.match(cidr):
+ return 'ipv6'
+ raise ValueError('"{0}" is not a valid CIDR'.format(cidr))
+
+
+def normalize_ipam_config_key(key):
+ """Normalizes IPAM config keys returned by Docker API to match Ansible keys.
+
+ :param key: Docker API key
+ :type key: str
+ :return Ansible module key
+ :rtype str
+ """
+ special_cases = {
+ 'AuxiliaryAddresses': 'aux_addresses'
+ }
+ return special_cases.get(key, key.lower())
+
+
+def dicts_are_essentially_equal(a, b):
+ """Make sure that a is a subset of b, where None entries of a are ignored."""
+ for k, v in a.items():
+ if v is None:
+ continue
+ if b.get(k) != v:
+ return False
+ return True
+
+
+class DockerNetworkManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_network = self.get_existing_network()
+
+ if not self.parameters.connected and self.existing_network:
+ self.parameters.connected = container_names_in_network(self.existing_network)
+
+ if self.parameters.ipam_config:
+ try:
+ for ipam_config in self.parameters.ipam_config:
+ validate_cidr(ipam_config['subnet'])
+ except ValueError as e:
+ self.client.fail(str(e))
+
+ if self.parameters.driver_options:
+ self.parameters.driver_options = clean_dict_booleans_for_docker_api(self.parameters.driver_options)
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_network(self):
+ return self.client.get_network(name=self.parameters.name)
+
+ def has_different_config(self, net):
+ '''
+ Evaluates an existing network and returns a tuple containing a boolean
+ indicating if the configuration is different and a list of differences.
+
+ :param net: the inspection output for an existing network
+ :return: (bool, list)
+ '''
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != net['Driver']:
+ differences.add('driver',
+ parameter=self.parameters.driver,
+ active=net['Driver'])
+ if self.parameters.driver_options:
+ if not net.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=net.get('Options'))
+ else:
+ for key, value in self.parameters.driver_options.items():
+ if not (key in net['Options']) or value != net['Options'][key]:
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=net['Options'].get(key))
+
+ if self.parameters.ipam_driver:
+ if not net.get('IPAM') or net['IPAM']['Driver'] != self.parameters.ipam_driver:
+ differences.add('ipam_driver',
+ parameter=self.parameters.ipam_driver,
+ active=net.get('IPAM'))
+
+ if self.parameters.ipam_driver_options is not None:
+ ipam_driver_options = net['IPAM'].get('Options') or {}
+ if ipam_driver_options != self.parameters.ipam_driver_options:
+ differences.add('ipam_driver_options',
+ parameter=self.parameters.ipam_driver_options,
+ active=ipam_driver_options)
+
+ if self.parameters.ipam_config is not None and self.parameters.ipam_config:
+ if not net.get('IPAM') or not net['IPAM']['Config']:
+ differences.add('ipam_config',
+ parameter=self.parameters.ipam_config,
+ active=net.get('IPAM', {}).get('Config'))
+ else:
+ # Put network's IPAM config into the same format as module's IPAM config
+ net_ipam_configs = []
+ for net_ipam_config in net['IPAM']['Config']:
+ config = dict()
+ for k, v in net_ipam_config.items():
+ config[normalize_ipam_config_key(k)] = v
+ net_ipam_configs.append(config)
+ # Compare lists of dicts as sets of dicts
+ for idx, ipam_config in enumerate(self.parameters.ipam_config):
+ net_config = dict()
+ for net_ipam_config in net_ipam_configs:
+ if dicts_are_essentially_equal(ipam_config, net_ipam_config):
+ net_config = net_ipam_config
+ break
+ for key, value in ipam_config.items():
+ if value is None:
+ # due to recursive argument_spec, all keys are always present
+ # (but have default value None if not specified)
+ continue
+ if value != net_config.get(key):
+ differences.add('ipam_config[%s].%s' % (idx, key),
+ parameter=value,
+ active=net_config.get(key))
+
+ if self.parameters.enable_ipv6 is not None and self.parameters.enable_ipv6 != net.get('EnableIPv6', False):
+ differences.add('enable_ipv6',
+ parameter=self.parameters.enable_ipv6,
+ active=net.get('EnableIPv6', False))
+
+ if self.parameters.internal is not None and self.parameters.internal != net.get('Internal', False):
+ differences.add('internal',
+ parameter=self.parameters.internal,
+ active=net.get('Internal'))
+
+ if self.parameters.scope is not None and self.parameters.scope != net.get('Scope'):
+ differences.add('scope',
+ parameter=self.parameters.scope,
+ active=net.get('Scope'))
+
+ if self.parameters.attachable is not None and self.parameters.attachable != net.get('Attachable', False):
+ differences.add('attachable',
+ parameter=self.parameters.attachable,
+ active=net.get('Attachable'))
+ if self.parameters.labels:
+ if not net.get('Labels'):
+ differences.add('labels',
+ parameter=self.parameters.labels,
+ active=net.get('Labels'))
+ else:
+ for key, value in self.parameters.labels.items():
+ if not (key in net['Labels']) or value != net['Labels'][key]:
+ differences.add('labels.%s' % key,
+ parameter=value,
+ active=net['Labels'].get(key))
+
+ return not differences.empty, differences
+
+ def create_network(self):
+ if not self.existing_network:
+ params = dict(
+ driver=self.parameters.driver,
+ options=self.parameters.driver_options,
+ )
+
+ ipam_pools = []
+ if self.parameters.ipam_config:
+ for ipam_pool in self.parameters.ipam_config:
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ ipam_pools.append(IPAMPool(**ipam_pool))
+ else:
+ ipam_pools.append(utils.create_ipam_pool(**ipam_pool))
+
+ if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools:
+ # Only add ipam parameter if a driver was specified or if IPAM parameters
+ # were specified. Leaving this parameter away can significantly speed up
+ # creation; on my machine creation with this option needs ~15 seconds,
+ # and without just a few seconds.
+ if LooseVersion(docker_version) >= LooseVersion('2.0.0'):
+ params['ipam'] = IPAMConfig(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools,
+ options=self.parameters.ipam_driver_options)
+ else:
+ params['ipam'] = utils.create_ipam_config(driver=self.parameters.ipam_driver,
+ pool_configs=ipam_pools)
+
+ if self.parameters.enable_ipv6 is not None:
+ params['enable_ipv6'] = self.parameters.enable_ipv6
+ if self.parameters.internal is not None:
+ params['internal'] = self.parameters.internal
+ if self.parameters.scope is not None:
+ params['scope'] = self.parameters.scope
+ if self.parameters.attachable is not None:
+ params['attachable'] = self.parameters.attachable
+ if self.parameters.labels:
+ params['labels'] = self.parameters.labels
+
+ if not self.check_mode:
+ resp = self.client.create_network(self.parameters.name, **params)
+ self.client.report_warnings(resp, ['Warning'])
+ self.existing_network = self.client.get_network(network_id=resp['Id'])
+ self.results['actions'].append("Created network %s with driver %s" % (self.parameters.name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_network(self):
+ if self.existing_network:
+ self.disconnect_all_containers()
+ if not self.check_mode:
+ self.client.remove_network(self.parameters.name)
+ self.results['actions'].append("Removed network %s" % (self.parameters.name,))
+ self.results['changed'] = True
+
+ def is_container_connected(self, container_name):
+ if not self.existing_network:
+ return False
+ return container_name in container_names_in_network(self.existing_network)
+
+ def connect_containers(self):
+ for name in self.parameters.connected:
+ if not self.is_container_connected(name):
+ if not self.check_mode:
+ self.client.connect_container_to_network(name, self.parameters.name)
+ self.results['actions'].append("Connected container %s" % (name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(name),
+ parameter=True,
+ active=False)
+
+ def disconnect_missing(self):
+ if not self.existing_network:
+ return
+ containers = self.existing_network['Containers']
+ if not containers:
+ return
+ for c in containers.values():
+ name = c['Name']
+ if name not in self.parameters.connected:
+ self.disconnect_container(name)
+
+ def disconnect_all_containers(self):
+ containers = self.client.get_network(name=self.parameters.name)['Containers']
+ if not containers:
+ return
+ for cont in containers.values():
+ self.disconnect_container(cont['Name'])
+
+ def disconnect_container(self, container_name):
+ if not self.check_mode:
+ self.client.disconnect_container_from_network(container_name, self.parameters.name)
+ self.results['actions'].append("Disconnected container %s" % (container_name,))
+ self.results['changed'] = True
+ self.diff_tracker.add('connected.{0}'.format(container_name),
+ parameter=False,
+ active=True)
+
+ def present(self):
+ different = False
+ differences = DifferenceTracker()
+ if self.existing_network:
+ different, differences = self.has_different_config(self.existing_network)
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_network is not None)
+ if self.parameters.force or different:
+ self.remove_network()
+ self.existing_network = None
+
+ self.create_network()
+ self.connect_containers()
+ if not self.parameters.appends:
+ self.disconnect_missing()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ network_facts = self.get_existing_network()
+ self.results['network'] = network_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_network is not None)
+ self.remove_network()
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['network_name']),
+ connected=dict(type='list', default=[], elements='str', aliases=['containers']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='bridge'),
+ driver_options=dict(type='dict', default={}),
+ force=dict(type='bool', default=False),
+ appends=dict(type='bool', default=False, aliases=['incremental']),
+ ipam_driver=dict(type='str'),
+ ipam_driver_options=dict(type='dict'),
+ ipam_config=dict(type='list', elements='dict', options=dict(
+ subnet=dict(type='str'),
+ iprange=dict(type='str'),
+ gateway=dict(type='str'),
+ aux_addresses=dict(type='dict'),
+ )),
+ enable_ipv6=dict(type='bool'),
+ internal=dict(type='bool'),
+ labels=dict(type='dict', default={}),
+ debug=dict(type='bool', default=False),
+ scope=dict(type='str', choices=['local', 'global', 'swarm']),
+ attachable=dict(type='bool'),
+ )
+
+ option_minimal_versions = dict(
+ scope=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ attachable=dict(docker_py_version='2.0.0', docker_api_version='1.26'),
+ labels=dict(docker_api_version='1.23'),
+ ipam_driver_options=dict(docker_py_version='2.0.0'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.22',
+ # "The docker server >= 1.10.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerNetworkManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network_info.py
new file mode 100644
index 00000000..491ebf8b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_network_info.py
@@ -0,0 +1,141 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_network_info
+
+short_description: Retrieves facts about docker network
+
+description:
+ - Retrieves facts about a docker network.
+ - Essentially returns the output of C(docker network inspect <name>), similar to what M(community.docker.docker_network)
+ returns for a non-absent network.
+
+
+options:
+ name:
+ description:
+ - The name of the network to inspect.
+ - When identifying an existing network name may be a name or a long or short network ID.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+author:
+ - "Dave Bendit (@DBendit)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on network
+ community.docker.docker_network_info:
+ name: mydata
+ register: result
+
+- name: Does network exist?
+ ansible.builtin.debug:
+ msg: "The network {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about network
+ ansible.builtin.debug:
+ var: result.network
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the network exists.
+ type: bool
+ returned: always
+ sample: true
+network:
+ description:
+ - Facts representing the current state of the network. Matches the docker inspection output.
+ - Will be C(none) if network does not exist.
+ returned: always
+ type: dict
+ sample: '{
+ "Attachable": false,
+ "ConfigFrom": {
+ "Network": ""
+ },
+ "ConfigOnly": false,
+ "Containers": {},
+ "Created": "2018-12-07T01:47:51.250835114-06:00",
+ "Driver": "bridge",
+ "EnableIPv6": false,
+ "IPAM": {
+ "Config": [
+ {
+ "Gateway": "192.168.96.1",
+ "Subnet": "192.168.96.0/20"
+ }
+ ],
+ "Driver": "default",
+ "Options": null
+ },
+ "Id": "0856968545f22026c41c2c7c3d448319d3b4a6a03a40b148b3ac4031696d1c0a",
+ "Ingress": false,
+ "Internal": false,
+ "Labels": {},
+ "Name": "ansible-test-f2700bba",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ network = client.get_network(client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if network else False),
+ network=network,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node.py
new file mode 100644
index 00000000..d73b2d70
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node.py
@@ -0,0 +1,294 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_node
+short_description: Manage Docker Swarm node
+description:
+ - Manages the Docker nodes via Swarm Manager.
+ - This module allows to change the node's role, its availability, and to modify, add or remove node labels.
+options:
+ hostname:
+ description:
+ - The hostname or ID of node as registered in Swarm.
+ - If more than one node is registered using the same hostname the ID must be used,
+ otherwise module will fail.
+ type: str
+ required: yes
+ labels:
+ description:
+ - User-defined key/value metadata that will be assigned as node attribute.
+ - Label operations in this module apply to the docker swarm node specified by I(hostname).
+ Use M(community.docker.docker_swarm) module to add/modify/remove swarm cluster labels.
+ - The actual state of labels assigned to the node when module completes its work depends on
+ I(labels_state) and I(labels_to_remove) parameters values. See description below.
+ type: dict
+ labels_state:
+ description:
+ - It defines the operation on the labels assigned to node and labels specified in I(labels) option.
+ - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node.
+ If no labels are assigned then it will add listed labels. For labels that are already assigned
+ to the node, it will update their values. The labels not specified in I(labels) will remain unchanged.
+ If I(labels) is empty then no changes will be made.
+ - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then
+ all labels assigned to the node will be removed.
+ type: str
+ default: 'merge'
+ choices:
+ - merge
+ - replace
+ labels_to_remove:
+ description:
+ - List of labels that will be removed from the node configuration. The list has to contain only label
+ names, not their values.
+ - If the label provided on the list is not assigned to the node, the entry is ignored.
+ - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains
+ assigned to the node.
+ - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to
+ node are removed and I(labels_to_remove) is ignored.
+ type: list
+ elements: str
+ availability:
+ description: Node availability to assign. If not provided then node availability remains unchanged.
+ choices:
+ - active
+ - pause
+ - drain
+ type: str
+ role:
+ description: Node role to assign. If not provided then node role remains unchanged.
+ choices:
+ - manager
+ - worker
+ type: str
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - Docker API >= 1.25
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+ - Thierry Bouvet (@tbouvet)
+
+'''
+
+EXAMPLES = '''
+- name: Set node role
+ community.docker.docker_node:
+ hostname: mynode
+ role: manager
+
+- name: Set node availability
+ community.docker.docker_node:
+ hostname: mynode
+ availability: drain
+
+- name: Replace node labels with new labels
+ community.docker.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+ labels_state: replace
+
+- name: Merge node labels and new labels
+ community.docker.docker_node:
+ hostname: mynode
+ labels:
+ key: value
+
+- name: Remove all labels assigned to node
+ community.docker.docker_node:
+ hostname: mynode
+ labels_state: replace
+
+- name: Remove selected labels from the node
+ community.docker.docker_node:
+ hostname: mynode
+ labels_to_remove:
+ - key1
+ - key2
+'''
+
+RETURN = '''
+node:
+ description: Information about node after 'update' operation
+ returned: success
+ type: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ DockerBaseClass,
+ RequestException,
+)
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+
+ # Spec
+ self.name = None
+ self.labels = None
+ self.labels_state = None
+ self.labels_to_remove = None
+
+ # Node
+ self.availability = None
+ self.role = None
+
+ for key, value in client.module.params.items():
+ setattr(self, key, value)
+
+
+class SwarmNodeManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmNodeManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.parameters = TaskParameters(client)
+
+ self.node_update()
+
+ def node_update(self):
+ if not (self.client.check_if_swarm_node(node_id=self.parameters.hostname)):
+ self.client.fail("This node is not part of a swarm.")
+ return
+
+ if self.client.check_if_swarm_node_is_down():
+ self.client.fail("Can not update the node. The node is down.")
+
+ try:
+ node_info = self.client.inspect_node(node_id=self.parameters.hostname)
+ except APIError as exc:
+ self.client.fail("Failed to get node information for %s" % to_native(exc))
+
+ changed = False
+ node_spec = dict(
+ Availability=self.parameters.availability,
+ Role=self.parameters.role,
+ Labels=self.parameters.labels,
+ )
+
+ if self.parameters.role is None:
+ node_spec['Role'] = node_info['Spec']['Role']
+ else:
+ if not node_info['Spec']['Role'] == self.parameters.role:
+ node_spec['Role'] = self.parameters.role
+ changed = True
+
+ if self.parameters.availability is None:
+ node_spec['Availability'] = node_info['Spec']['Availability']
+ else:
+ if not node_info['Spec']['Availability'] == self.parameters.availability:
+ node_info['Spec']['Availability'] = self.parameters.availability
+ changed = True
+
+ if self.parameters.labels_state == 'replace':
+ if self.parameters.labels is None:
+ node_spec['Labels'] = {}
+ if node_info['Spec']['Labels']:
+ changed = True
+ else:
+ if (node_info['Spec']['Labels'] or {}) != self.parameters.labels:
+ node_spec['Labels'] = self.parameters.labels
+ changed = True
+ elif self.parameters.labels_state == 'merge':
+ node_spec['Labels'] = dict(node_info['Spec']['Labels'] or {})
+ if self.parameters.labels is not None:
+ for key, value in self.parameters.labels.items():
+ if node_spec['Labels'].get(key) != value:
+ node_spec['Labels'][key] = value
+ changed = True
+
+ if self.parameters.labels_to_remove is not None:
+ for key in self.parameters.labels_to_remove:
+ if self.parameters.labels is not None:
+ if not self.parameters.labels.get(key):
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+ else:
+ self.client.module.warn(
+ "Label '%s' listed both in 'labels' and 'labels_to_remove'. "
+ "Keeping the assigned label value."
+ % to_native(key))
+ else:
+ if node_spec['Labels'].get(key):
+ node_spec['Labels'].pop(key)
+ changed = True
+
+ if changed is True:
+ if not self.check_mode:
+ try:
+ self.client.update_node(node_id=node_info['ID'], version=node_info['Version']['Index'],
+ node_spec=node_spec)
+ except APIError as exc:
+ self.client.fail("Failed to update node : %s" % to_native(exc))
+ self.results['node'] = self.client.get_node_inspect(node_id=node_info['ID'])
+ self.results['changed'] = changed
+ else:
+ self.results['node'] = node_info
+ self.results['changed'] = changed
+
+
+def main():
+ argument_spec = dict(
+ hostname=dict(type='str', required=True),
+ labels=dict(type='dict'),
+ labels_state=dict(type='str', default='merge', choices=['merge', 'replace']),
+ labels_to_remove=dict(type='list', elements='str'),
+ availability=dict(type='str', choices=['active', 'pause', 'drain']),
+ role=dict(type='str', choices=['worker', 'manager']),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ SwarmNodeManager(client, results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node_info.py
new file mode 100644
index 00000000..d541588c
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_node_info.py
@@ -0,0 +1,156 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_node_info
+
+short_description: Retrieves facts about docker swarm node from Swarm Manager
+
+description:
+ - Retrieves facts about a docker node.
+ - Essentially returns the output of C(docker node inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the node to inspect.
+ - The list of nodes names to inspect.
+ - If empty then return information of all nodes in Swarm cluster.
+ - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID.
+ - If I(self) is C(true) then this parameter is ignored.
+ type: list
+ elements: str
+ self:
+ description:
+ - If C(true), queries the node (i.e. the docker daemon) the module communicates with.
+ - If C(true) then I(name) is ignored.
+ - If C(false) then query depends on I(name) presence and value.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.4.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on all nodes
+ community.docker.docker_node_info:
+ register: result
+
+- name: Get info on node
+ community.docker.docker_node_info:
+ name: mynode
+ register: result
+
+- name: Get info on list of nodes
+ community.docker.docker_node_info:
+ name:
+ - mynode1
+ - mynode2
+ register: result
+
+- name: Get info on host if it is Swarm Manager
+ community.docker.docker_node_info:
+ self: true
+ register: result
+'''
+
+RETURN = '''
+nodes:
+ description:
+ - Facts representing the current state of the nodes. Matches the C(docker node inspect) output.
+ - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided.
+ - If I(name) contains a list of nodes, the output will provide information on all nodes registered
+ at the swarm, including nodes that left the swarm but haven't been removed from the cluster on swarm
+ managers and nodes that are unreachable.
+ returned: always
+ type: list
+ elements: dict
+'''
+
+import traceback
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ RequestException,
+)
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_node_facts(client):
+
+ results = []
+
+ if client.module.params['self'] is True:
+ self_node_id = client.get_swarm_node_id()
+ node_info = client.get_node_inspect(node_id=self_node_id)
+ results.append(node_info)
+ return results
+
+ if client.module.params['name'] is None:
+ node_info = client.get_all_nodes_inspect()
+ return node_info
+
+ nodes = client.module.params['name']
+ if not isinstance(nodes, list):
+ nodes = [nodes]
+
+ for next_node_name in nodes:
+ next_node_info = client.get_node_inspect(node_id=next_node_name, skip_missing=True)
+ if next_node_info:
+ results.append(next_node_info)
+ return results
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='list', elements='str'),
+ self=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.4.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ nodes = get_node_facts(client)
+
+ client.module.exit_json(
+ changed=False,
+ nodes=nodes,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_prune.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_prune.py
new file mode 100644
index 00000000..8fcb4094
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_prune.py
@@ -0,0 +1,265 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_prune
+
+short_description: Allows to prune various docker objects
+
+description:
+ - Allows to run C(docker container prune), C(docker image prune), C(docker network prune)
+ and C(docker volume prune) via the Docker API.
+
+
+options:
+ containers:
+ description:
+ - Whether to prune containers.
+ type: bool
+ default: no
+ containers_filters:
+ description:
+ - A dictionary of filter values used for selecting containers to delete.
+ - "For example, C(until: 24h)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/container_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ images:
+ description:
+ - Whether to prune images.
+ type: bool
+ default: no
+ images_filters:
+ description:
+ - A dictionary of filter values used for selecting images to delete.
+ - "For example, C(dangling: true)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/image_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ networks:
+ description:
+ - Whether to prune networks.
+ type: bool
+ default: no
+ networks_filters:
+ description:
+ - A dictionary of filter values used for selecting networks to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/network_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ volumes:
+ description:
+ - Whether to prune volumes.
+ type: bool
+ default: no
+ volumes_filters:
+ description:
+ - A dictionary of filter values used for selecting volumes to delete.
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/volume_prune/#filtering)
+ for more information on possible filters.
+ type: dict
+ builder_cache:
+ description:
+ - Whether to prune the builder cache.
+ - Requires version 3.3.0 of the Docker SDK for Python or newer.
+ type: bool
+ default: no
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_2_documentation
+
+
+author:
+ - "Felix Fontein (@felixfontein)"
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+'''
+
+EXAMPLES = '''
+- name: Prune containers older than 24h
+ community.docker.docker_prune:
+ containers: yes
+ containers_filters:
+ # only consider containers created more than 24 hours ago
+ until: 24h
+
+- name: Prune everything
+ community.docker.docker_prune:
+ containers: yes
+ images: yes
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+
+- name: Prune everything (including non-dangling images)
+ community.docker.docker_prune:
+ containers: yes
+ images: yes
+ images_filters:
+ dangling: false
+ networks: yes
+ volumes: yes
+ builder_cache: yes
+'''
+
+RETURN = '''
+# containers
+containers:
+ description:
+ - List of IDs of deleted containers.
+ returned: I(containers) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+containers_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from container pruning in bytes.
+ returned: I(containers) is C(true)
+ type: int
+ sample: '0'
+
+# images
+images:
+ description:
+ - List of IDs of deleted images.
+ returned: I(images) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+images_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from image pruning in bytes.
+ returned: I(images) is C(true)
+ type: int
+ sample: '0'
+
+# networks
+networks:
+ description:
+ - List of IDs of deleted networks.
+ returned: I(networks) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+
+# volumes
+volumes:
+ description:
+ - List of IDs of deleted volumes.
+ returned: I(volumes) is C(true)
+ type: list
+ elements: str
+ sample: '[]'
+volumes_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from volumes pruning in bytes.
+ returned: I(volumes) is C(true)
+ type: int
+ sample: '0'
+
+# builder_cache
+builder_cache_space_reclaimed:
+ description:
+ - Amount of reclaimed disk space from builder cache pruning in bytes.
+ returned: I(builder_cache) is C(true)
+ type: int
+ sample: '0'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+try:
+ from ansible_collections.community.docker.plugins.module_utils.common import docker_version, clean_dict_booleans_for_docker_api
+except Exception as dummy:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def main():
+ argument_spec = dict(
+ containers=dict(type='bool', default=False),
+ containers_filters=dict(type='dict'),
+ images=dict(type='bool', default=False),
+ images_filters=dict(type='dict'),
+ networks=dict(type='bool', default=False),
+ networks_filters=dict(type='dict'),
+ volumes=dict(type='bool', default=False),
+ volumes_filters=dict(type='dict'),
+ builder_cache=dict(type='bool', default=False),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ # supports_check_mode=True,
+ min_docker_api_version='1.25',
+ min_docker_version='2.1.0',
+ )
+
+ # Version checks
+ cache_min_version = '3.3.0'
+ if client.module.params['builder_cache'] and client.docker_py_version < LooseVersion(cache_min_version):
+ msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade."
+ client.fail(msg % (docker_version, cache_min_version))
+
+ try:
+ result = dict()
+
+ if client.module.params['containers']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('containers_filters'))
+ res = client.prune_containers(filters=filters)
+ result['containers'] = res.get('ContainersDeleted') or []
+ result['containers_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['images']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('images_filters'))
+ res = client.prune_images(filters=filters)
+ result['images'] = res.get('ImagesDeleted') or []
+ result['images_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['networks']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('networks_filters'))
+ res = client.prune_networks(filters=filters)
+ result['networks'] = res.get('NetworksDeleted') or []
+
+ if client.module.params['volumes']:
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get('volumes_filters'))
+ res = client.prune_volumes(filters=filters)
+ result['volumes'] = res.get('VolumesDeleted') or []
+ result['volumes_space_reclaimed'] = res['SpaceReclaimed']
+
+ if client.module.params['builder_cache']:
+ res = client.prune_builds()
+ result['builder_cache_space_reclaimed'] = res['SpaceReclaimed']
+
+ client.module.exit_json(**result)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_secret.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_secret.py
new file mode 100644
index 00000000..57df6e66
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_secret.py
@@ -0,0 +1,302 @@
+#!/usr/bin/python
+#
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_secret
+
+short_description: Manage docker secrets.
+
+
+description:
+ - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm).
+ - Adds to the metadata of new secrets 'ansible_key', an encrypted hash representation of the data, which is then used
+ in future runs to test if a secret has changed. If 'ansible_key is not present, then a secret will not be updated
+ unless the I(force) option is set.
+ - Updates to secrets are performed by removing the secret and creating it again.
+options:
+ data:
+ description:
+ - The value of the secret. Required when state is C(present).
+ type: str
+ data_is_b64:
+ description:
+ - If set to C(true), the data is assumed to be Base64 encoded and will be
+ decoded before being used.
+ - To use binary I(data), it is better to keep it Base64 encoded and let it
+ be decoded by this option.
+ type: bool
+ default: no
+ labels:
+ description:
+ - "A map of key:value meta data, where both key and value are expected to be strings."
+ - If new meta data is provided, or existing meta data is modified, the secret will be updated by removing it and creating it again.
+ type: dict
+ force:
+ description:
+ - Use with state C(present) to always remove and recreate an existing secret.
+ - If C(true), an existing secret will be replaced, even if it has not changed.
+ type: bool
+ default: no
+ name:
+ description:
+ - The name of the secret.
+ type: str
+ required: yes
+ state:
+ description:
+ - Set to C(present), if the secret should exist, and C(absent), if it should not.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_2_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.1.0"
+ - "Docker API >= 1.25"
+
+author:
+ - Chris Houseknecht (@chouseknecht)
+'''
+
+EXAMPLES = '''
+
+- name: Create secret foo (from a file on the control machine)
+ community.docker.docker_secret:
+ name: foo
+ # If the file is JSON or binary, Ansible might modify it (because
+ # it is first decoded and later re-encoded). Base64-encoding the
+ # file directly after reading it prevents this to happen.
+ data: "{{ lookup('file', '/path/to/secret/file') | b64encode }}"
+ data_is_b64: true
+ state: present
+
+- name: Change the secret data
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ state: present
+
+- name: Add a new label
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Adding a new label will cause a remove/create of the secret
+ two: '2'
+ state: present
+
+- name: No change
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: baz
+ one: '1'
+ # Even though 'two' is missing, there is no change to the existing secret
+ state: present
+
+- name: Update an existing label
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ labels:
+ bar: monkey # Changing a label will cause a remove/create of the secret
+ one: '1'
+ state: present
+
+- name: Force the removal/creation of the secret
+ community.docker.docker_secret:
+ name: foo
+ data: Goodnight everyone!
+ force: yes
+ state: present
+
+- name: Remove secret foo
+ community.docker.docker_secret:
+ name: foo
+ state: absent
+'''
+
+RETURN = '''
+secret_id:
+ description:
+ - The ID assigned by Docker to the secret object.
+ returned: success and I(state) is C(present)
+ type: str
+ sample: 'hzehrmyjigmcp2gb6nlhmjqcv'
+'''
+
+import base64
+import hashlib
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ DockerBaseClass,
+ compare_generic,
+ RequestException,
+)
+from ansible.module_utils._text import to_native, to_bytes
+
+
+class SecretManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SecretManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+
+ parameters = self.client.module.params
+ self.name = parameters.get('name')
+ self.state = parameters.get('state')
+ self.data = parameters.get('data')
+ if self.data is not None:
+ if parameters.get('data_is_b64'):
+ self.data = base64.b64decode(self.data)
+ else:
+ self.data = to_bytes(self.data)
+ self.labels = parameters.get('labels')
+ self.force = parameters.get('force')
+ self.data_key = None
+
+ def __call__(self):
+ if self.state == 'present':
+ self.data_key = hashlib.sha224(self.data).hexdigest()
+ self.present()
+ elif self.state == 'absent':
+ self.absent()
+
+ def get_secret(self):
+ ''' Find an existing secret. '''
+ try:
+ secrets = self.client.secrets(filters={'name': self.name})
+ except APIError as exc:
+ self.client.fail("Error accessing secret %s: %s" % (self.name, to_native(exc)))
+
+ for secret in secrets:
+ if secret['Spec']['Name'] == self.name:
+ return secret
+ return None
+
+ def create_secret(self):
+ ''' Create a new secret '''
+ secret_id = None
+ # We can't see the data after creation, so adding a label we can use for idempotency check
+ labels = {
+ 'ansible_key': self.data_key
+ }
+ if self.labels:
+ labels.update(self.labels)
+
+ try:
+ if not self.check_mode:
+ secret_id = self.client.create_secret(self.name, self.data, labels=labels)
+ except APIError as exc:
+ self.client.fail("Error creating secret: %s" % to_native(exc))
+
+ if isinstance(secret_id, dict):
+ secret_id = secret_id['ID']
+
+ return secret_id
+
+ def present(self):
+ ''' Handles state == 'present', creating or updating the secret '''
+ secret = self.get_secret()
+ if secret:
+ self.results['secret_id'] = secret['ID']
+ data_changed = False
+ attrs = secret.get('Spec', {})
+ if attrs.get('Labels', {}).get('ansible_key'):
+ if attrs['Labels']['ansible_key'] != self.data_key:
+ data_changed = True
+ else:
+ if not self.force:
+ self.client.module.warn("'ansible_key' label not found. Secret will not be changed unless the force parameter is set to 'yes'")
+ labels_changed = not compare_generic(self.labels, attrs.get('Labels'), 'allow_more_present', 'dict')
+ if data_changed or labels_changed or self.force:
+ # if something changed or force, delete and re-create the secret
+ self.absent()
+ secret_id = self.create_secret()
+ self.results['changed'] = True
+ self.results['secret_id'] = secret_id
+ else:
+ self.results['changed'] = True
+ self.results['secret_id'] = self.create_secret()
+
+ def absent(self):
+ ''' Handles state == 'absent', removing the secret '''
+ secret = self.get_secret()
+ if secret:
+ try:
+ if not self.check_mode:
+ self.client.remove_secret(secret['ID'])
+ except APIError as exc:
+ self.client.fail("Error removing secret %s: %s" % (self.name, to_native(exc)))
+ self.results['changed'] = True
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ state=dict(type='str', default='present', choices=['absent', 'present']),
+ data=dict(type='str', no_log=True),
+ data_is_b64=dict(type='bool', default=False),
+ labels=dict(type='dict'),
+ force=dict(type='bool', default=False)
+ )
+
+ required_if = [
+ ('state', 'present', ['data'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='2.1.0',
+ min_docker_api_version='1.25',
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ secret_id=''
+ )
+
+ SecretManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack.py
new file mode 100644
index 00000000..9688b6fd
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2018 Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_stack
+author: "Dario Zanzico (@dariko)"
+short_description: docker stack module
+description:
+ - Manage docker stacks using the 'docker stack' command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name
+ type: str
+ required: yes
+ state:
+ description:
+ - Service state.
+ type: str
+ default: "present"
+ choices:
+ - present
+ - absent
+ compose:
+ description:
+ - List of compose definitions. Any element may be a string
+ referring to the path of the compose file on the target host
+ or the YAML contents of a compose file nested as dictionary.
+ type: list
+ elements: raw
+ default: []
+ prune:
+ description:
+ - If true will add the C(--prune) option to the C(docker stack deploy) command.
+ This will have docker remove the services not present in the
+ current stack definition.
+ type: bool
+ default: no
+ with_registry_auth:
+ description:
+ - If true will add the C(--with-registry-auth) option to the C(docker stack deploy) command.
+ This will have docker send registry authentication details to Swarm agents.
+ type: bool
+ default: no
+ resolve_image:
+ description:
+ - If set will add the C(--resolve-image) option to the C(docker stack deploy) command.
+ This will have docker query the registry to resolve image digest and
+ supported platforms. If not set, docker use "always" by default.
+ type: str
+ choices: ["always", "changed", "never"]
+ absent_retries:
+ description:
+ - If C(>0) and I(state) is C(absent) the module will retry up to
+ I(absent_retries) times to delete the stack until all the
+ resources have been effectively deleted.
+ If the last try still reports the stack as not completely
+ removed the module will fail.
+ type: int
+ default: 0
+ absent_retries_interval:
+ description:
+ - Interval in seconds between consecutive I(absent_retries).
+ type: int
+ default: 1
+
+requirements:
+ - jsondiff
+ - pyyaml
+
+notes:
+ - Return values I(out) and I(err) have been deprecated and will be removed in community.docker 2.0.0. Use I(stdout) and I(stderr) instead.
+'''
+
+RETURN = '''
+stack_spec_diff:
+ description: |
+ dictionary containing the differences between the 'Spec' field
+ of the stack services before and after applying the new stack
+ definition.
+ sample: >
+ "stack_spec_diff":
+ {'test_stack_test_service': {u'TaskTemplate': {u'ContainerSpec': {delete: [u'Env']}}}}
+ returned: on change
+ type: dict
+'''
+
+EXAMPLES = '''
+ - name: Deploy stack from a compose file
+ community.docker.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+
+ - name: Deploy stack from base compose file and override the web service
+ community.docker.docker_stack:
+ state: present
+ name: mystack
+ compose:
+ - /opt/docker-compose.yml
+ - version: '3'
+ services:
+ web:
+ image: nginx:latest
+ environment:
+ ENVVAR: envvar
+
+ - name: Remove stack
+ community.docker.docker_stack:
+ name: mystack
+ state: absent
+'''
+
+
+import json
+import tempfile
+from ansible.module_utils.six import string_types
+from time import sleep
+
+try:
+ from jsondiff import diff as json_diff
+ HAS_JSONDIFF = True
+except ImportError:
+ HAS_JSONDIFF = False
+
+try:
+ from yaml import dump as yaml_dump
+ HAS_YAML = True
+except ImportError:
+ HAS_YAML = False
+
+from ansible.module_utils.basic import AnsibleModule, os
+
+
+def docker_stack_services(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "stack",
+ "services",
+ stack_name,
+ "--format",
+ "{{.Name}}"])
+ if err == "Nothing found in stack: %s\n" % stack_name:
+ return []
+ return out.strip().split('\n')
+
+
+def docker_service_inspect(module, service_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command([docker_bin,
+ "service",
+ "inspect",
+ service_name])
+ if rc != 0:
+ return None
+ else:
+ ret = json.loads(out)[0]['Spec']
+ return ret
+
+
+def docker_stack_deploy(module, stack_name, compose_files):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "deploy"]
+ if module.params["prune"]:
+ command += ["--prune"]
+ if module.params["with_registry_auth"]:
+ command += ["--with-registry-auth"]
+ if module.params["resolve_image"]:
+ command += ["--resolve-image",
+ module.params["resolve_image"]]
+ for compose_file in compose_files:
+ command += ["--compose-file",
+ compose_file]
+ command += [stack_name]
+ return module.run_command(command)
+
+
+def docker_stack_inspect(module, stack_name):
+ ret = {}
+ for service_name in docker_stack_services(module, stack_name):
+ ret[service_name] = docker_service_inspect(module, service_name)
+ return ret
+
+
+def docker_stack_rm(module, stack_name, retries, interval):
+ docker_bin = module.get_bin_path('docker', required=True)
+ command = [docker_bin, "stack", "rm", stack_name]
+
+ rc, out, err = module.run_command(command)
+
+ while err != "Nothing found in stack: %s\n" % stack_name and retries > 0:
+ sleep(interval)
+ retries = retries - 1
+ rc, out, err = module.run_command(command)
+ return rc, out, err
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True),
+ 'compose': dict(type='list', elements='raw', default=[]),
+ 'prune': dict(type='bool', default=False),
+ 'with_registry_auth': dict(type='bool', default=False),
+ 'resolve_image': dict(type='str', choices=['always', 'changed', 'never']),
+ 'state': dict(type='str', default='present', choices=['present', 'absent']),
+ 'absent_retries': dict(type='int', default=0),
+ 'absent_retries_interval': dict(type='int', default=1)
+ },
+ supports_check_mode=False
+ )
+
+ if not HAS_JSONDIFF:
+ return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'")
+
+ if not HAS_YAML:
+ return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'")
+
+ state = module.params['state']
+ compose = module.params['compose']
+ name = module.params['name']
+ absent_retries = module.params['absent_retries']
+ absent_retries_interval = module.params['absent_retries_interval']
+
+ if state == 'present':
+ if not compose:
+ module.fail_json(msg=("compose parameter must be a list "
+ "containing at least one element"))
+
+ compose_files = []
+ for i, compose_def in enumerate(compose):
+ if isinstance(compose_def, dict):
+ compose_file_fd, compose_file = tempfile.mkstemp()
+ module.add_cleanup_file(compose_file)
+ with os.fdopen(compose_file_fd, 'w') as stack_file:
+ compose_files.append(compose_file)
+ stack_file.write(yaml_dump(compose_def))
+ elif isinstance(compose_def, string_types):
+ compose_files.append(compose_def)
+ else:
+ module.fail_json(msg="compose element '%s' must be a " +
+ "string or a dictionary" % compose_def)
+
+ before_stack_services = docker_stack_inspect(module, name)
+
+ rc, out, err = docker_stack_deploy(module, name, compose_files)
+
+ after_stack_services = docker_stack_inspect(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="docker stack up deploy command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+
+ before_after_differences = json_diff(before_stack_services,
+ after_stack_services)
+ for k in before_after_differences.keys():
+ if isinstance(before_after_differences[k], dict):
+ before_after_differences[k].pop('UpdatedAt', None)
+ before_after_differences[k].pop('Version', None)
+ if not list(before_after_differences[k].keys()):
+ before_after_differences.pop(k)
+
+ if not before_after_differences:
+ module.exit_json(
+ changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err)
+ else:
+ module.exit_json(
+ changed=True,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ stack_spec_diff=json_diff(before_stack_services,
+ after_stack_services,
+ dump=True))
+
+ else:
+ if docker_stack_services(module, name):
+ rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval)
+ if rc != 0:
+ module.fail_json(msg="'docker stack down' command failed",
+ rc=rc,
+ out=out, err=err, # Deprecated
+ stdout=out, stderr=err)
+ else:
+ module.exit_json(changed=True,
+ msg=out, rc=rc,
+ err=err, # Deprecated
+ stdout=out, stderr=err)
+ module.exit_json(changed=False)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_info.py
new file mode 100644
index 00000000..32f59ac5
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_info.py
@@ -0,0 +1,83 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information on a docker stack
+description:
+ - Retrieve information on docker stacks using the C(docker stack) command
+ on the target node (see examples).
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of stacks or tasks associated
+ to a stack name.
+ sample: >
+ "results": [{"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"}]
+ returned: always
+ type: list
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.docker.docker_stack_info:
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_list(module):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ls", "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ },
+ supports_check_mode=False
+ )
+
+ rc, out, err = docker_stack_list(module)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py
new file mode 100644
index 00000000..db6f03f4
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py
@@ -0,0 +1,94 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+
+# Copyright (c) 2020 Jose Angel Munoz (@imjoseangel)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_stack_task_info
+author: "Jose Angel Munoz (@imjoseangel)"
+short_description: Return information of the tasks on a docker stack
+description:
+ - Retrieve information on docker stacks tasks using the C(docker stack) command
+ on the target node (see examples).
+options:
+ name:
+ description:
+ - Stack name.
+ type: str
+ required: yes
+'''
+
+RETURN = '''
+results:
+ description: |
+ List of dictionaries containing the list of tasks associated
+ to a stack name.
+ sample: >
+ [{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}]
+ returned: always
+ type: list
+ elements: dict
+'''
+
+EXAMPLES = '''
+ - name: Shows stack info
+ community.docker.docker_stack_task_info:
+ name: test_stack
+ register: result
+
+ - name: Show results
+ ansible.builtin.debug:
+ var: result.results
+'''
+
+import json
+from ansible.module_utils.basic import AnsibleModule
+
+
+def docker_stack_task(module, stack_name):
+ docker_bin = module.get_bin_path('docker', required=True)
+ rc, out, err = module.run_command(
+ [docker_bin, "stack", "ps", stack_name, "--format={{json .}}"])
+
+ return rc, out.strip(), err.strip()
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec={
+ 'name': dict(type='str', required=True)
+ },
+ supports_check_mode=False
+ )
+
+ name = module.params['name']
+
+ rc, out, err = docker_stack_task(module, name)
+
+ if rc != 0:
+ module.fail_json(msg="Error running docker stack. {0}".format(err),
+ rc=rc, stdout=out, stderr=err)
+ else:
+ if out:
+ ret = list(
+ json.loads(outitem)
+ for outitem in out.splitlines())
+
+ else:
+ ret = []
+
+ module.exit_json(changed=False,
+ rc=rc,
+ stdout=out,
+ stderr=err,
+ results=ret)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm.py
new file mode 100644
index 00000000..5cd2b91b
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm.py
@@ -0,0 +1,662 @@
+#!/usr/bin/python
+
+# Copyright 2016 Red Hat | Ansible
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm
+short_description: Manage Swarm cluster
+description:
+ - Create a new Swarm cluster.
+ - Add/Remove nodes or managers to an existing cluster.
+options:
+ advertise_addr:
+ description:
+ - Externally reachable address advertised to other nodes.
+ - This can either be an address/port combination
+ in the form C(192.168.1.1:4567), or an interface followed by a
+ port number, like C(eth0:4567).
+ - If the port number is omitted,
+ the port number from the listen address is used.
+ - If I(advertise_addr) is not specified, it will be automatically
+ detected when possible.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default_addr_pool:
+ description:
+ - Default address pool in CIDR format.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: list
+ elements: str
+ subnet_size:
+ description:
+ - Default address pool subnet mask length.
+ - Only used when swarm is initialised. Because of this it's not considered
+ for idempotency checking.
+ - Requires API version >= 1.39.
+ type: int
+ listen_addr:
+ description:
+ - Listen address used for inter-manager communication.
+ - This can either be an address/port combination in the form
+ C(192.168.1.1:4567), or an interface followed by a port number,
+ like C(eth0:4567).
+ - If the port number is omitted, the default swarm listening port
+ is used.
+ - Only used when swarm is initialised or joined. Because of this it's not
+ considered for idempotency checking.
+ type: str
+ default: 0.0.0.0:2377
+ force:
+ description:
+ - Use with state C(present) to force creating a new Swarm, even if already part of one.
+ - Use with state C(absent) to Leave the swarm even if this node is a manager.
+ type: bool
+ default: no
+ state:
+ description:
+ - Set to C(present), to create/update a new cluster.
+ - Set to C(join), to join an existing cluster.
+ - Set to C(absent), to leave an existing cluster.
+ - Set to C(remove), to remove an absent node from the cluster.
+ Note that removing requires Docker SDK for Python >= 2.4.0.
+ type: str
+ default: present
+ choices:
+ - present
+ - join
+ - absent
+ - remove
+ node_id:
+ description:
+ - Swarm id of the node to remove.
+ - Used with I(state=remove).
+ type: str
+ join_token:
+ description:
+ - Swarm token used to join a swarm cluster.
+ - Used with I(state=join).
+ type: str
+ remote_addrs:
+ description:
+ - Remote address of one or more manager nodes of an existing Swarm to connect to.
+ - Used with I(state=join).
+ type: list
+ elements: str
+ task_history_retention_limit:
+ description:
+ - Maximum number of tasks history stored.
+ - Docker default value is C(5).
+ type: int
+ snapshot_interval:
+ description:
+ - Number of logs entries between snapshot.
+ - Docker default value is C(10000).
+ type: int
+ keep_old_snapshots:
+ description:
+ - Number of snapshots to keep beyond the current snapshot.
+ - Docker default value is C(0).
+ type: int
+ log_entries_for_slow_followers:
+ description:
+ - Number of log entries to keep around to sync up slow followers after a snapshot is created.
+ type: int
+ heartbeat_tick:
+ description:
+ - Amount of ticks (in seconds) between each heartbeat.
+ - Docker default value is C(1s).
+ type: int
+ election_tick:
+ description:
+ - Amount of ticks (in seconds) needed without a leader to trigger a new election.
+ - Docker default value is C(10s).
+ type: int
+ dispatcher_heartbeat_period:
+ description:
+ - The delay for an agent to send a heartbeat to the dispatcher.
+ - Docker default value is C(5s).
+ type: int
+ node_cert_expiry:
+ description:
+ - Automatic expiry for nodes certificates.
+ - Docker default value is C(3months).
+ type: int
+ name:
+ description:
+ - The name of the swarm.
+ type: str
+ labels:
+ description:
+ - User-defined key/value metadata.
+ - Label operations in this module apply to the docker swarm cluster.
+ Use M(community.docker.docker_node) module to add/modify/remove swarm node labels.
+ - Requires API version >= 1.32.
+ type: dict
+ signing_ca_cert:
+ description:
+ - The desired signing CA certificate for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a certificate, but the contents of the certificate.
+ - Requires API version >= 1.30.
+ type: str
+ signing_ca_key:
+ description:
+ - The desired signing CA key for all swarm node TLS leaf certificates, in PEM format.
+ - This must not be a path to a key, but the contents of the key.
+ - Requires API version >= 1.30.
+ type: str
+ ca_force_rotate:
+ description:
+ - An integer whose purpose is to force swarm to generate a new signing CA certificate and key,
+ if none have been specified.
+ - Docker default value is C(0).
+ - Requires API version >= 1.30.
+ type: int
+ autolock_managers:
+ description:
+ - If set, generate a key and use it to lock data stored on the managers.
+ - Docker default value is C(no).
+ - M(community.docker.docker_swarm_info) can be used to retrieve the unlock key.
+ type: bool
+ rotate_worker_token:
+ description: Rotate the worker join token.
+ type: bool
+ default: no
+ rotate_manager_token:
+ description: Rotate the manager join token.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - Docker API >= 1.25
+author:
+ - Thierry Bouvet (@tbouvet)
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+'''
+
+EXAMPLES = '''
+
+- name: Init a new swarm with default parameters
+ community.docker.docker_swarm:
+ state: present
+
+- name: Update swarm configuration
+ community.docker.docker_swarm:
+ state: present
+ election_tick: 5
+
+- name: Add nodes
+ community.docker.docker_swarm:
+ state: join
+ advertise_addr: 192.168.1.2
+ join_token: SWMTKN-1--xxxxx
+ remote_addrs: [ '192.168.1.1:2377' ]
+
+- name: Leave swarm for a node
+ community.docker.docker_swarm:
+ state: absent
+
+- name: Remove a swarm manager
+ community.docker.docker_swarm:
+ state: absent
+ force: true
+
+- name: Remove node from swarm
+ community.docker.docker_swarm:
+ state: remove
+ node_id: mynode
+'''
+
+RETURN = '''
+swarm_facts:
+ description: Informations about swarm.
+ returned: success
+ type: dict
+ contains:
+ JoinTokens:
+ description: Tokens to connect to the Swarm.
+ returned: success
+ type: dict
+ contains:
+ Worker:
+ description: Token to create a new *worker* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ Manager:
+ description: Token to create a new *manager* node
+ returned: success
+ type: str
+ example: SWMTKN-1--xxxxx
+ UnlockKey:
+ description: The swarm unlock-key if I(autolock_managers) is C(true).
+ returned: on success if I(autolock_managers) is C(true)
+ and swarm is initialised, or if I(autolock_managers) has changed.
+ type: str
+ example: SWMKEY-1-xxx
+
+actions:
+ description: Provides the actions done on the swarm.
+ returned: when action failed.
+ type: list
+ elements: str
+ example: "['This cluster is already a swarm cluster']"
+
+'''
+
+import json
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ DockerBaseClass,
+ DifferenceTracker,
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+from ansible.module_utils._text import to_native
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self):
+ super(TaskParameters, self).__init__()
+
+ self.advertise_addr = None
+ self.listen_addr = None
+ self.remote_addrs = None
+ self.join_token = None
+
+ # Spec
+ self.snapshot_interval = None
+ self.task_history_retention_limit = None
+ self.keep_old_snapshots = None
+ self.log_entries_for_slow_followers = None
+ self.heartbeat_tick = None
+ self.election_tick = None
+ self.dispatcher_heartbeat_period = None
+ self.node_cert_expiry = None
+ self.name = None
+ self.labels = None
+ self.log_driver = None
+ self.signing_ca_cert = None
+ self.signing_ca_key = None
+ self.ca_force_rotate = None
+ self.autolock_managers = None
+ self.rotate_worker_token = None
+ self.rotate_manager_token = None
+ self.default_addr_pool = None
+ self.subnet_size = None
+
+ @staticmethod
+ def from_ansible_params(client):
+ result = TaskParameters()
+ for key, value in client.module.params.items():
+ if key in result.__dict__:
+ setattr(result, key, value)
+
+ result.update_parameters(client)
+ return result
+
+ def update_from_swarm_info(self, swarm_info):
+ spec = swarm_info['Spec']
+
+ ca_config = spec.get('CAConfig') or dict()
+ if self.node_cert_expiry is None:
+ self.node_cert_expiry = ca_config.get('NodeCertExpiry')
+ if self.ca_force_rotate is None:
+ self.ca_force_rotate = ca_config.get('ForceRotate')
+
+ dispatcher = spec.get('Dispatcher') or dict()
+ if self.dispatcher_heartbeat_period is None:
+ self.dispatcher_heartbeat_period = dispatcher.get('HeartbeatPeriod')
+
+ raft = spec.get('Raft') or dict()
+ if self.snapshot_interval is None:
+ self.snapshot_interval = raft.get('SnapshotInterval')
+ if self.keep_old_snapshots is None:
+ self.keep_old_snapshots = raft.get('KeepOldSnapshots')
+ if self.heartbeat_tick is None:
+ self.heartbeat_tick = raft.get('HeartbeatTick')
+ if self.log_entries_for_slow_followers is None:
+ self.log_entries_for_slow_followers = raft.get('LogEntriesForSlowFollowers')
+ if self.election_tick is None:
+ self.election_tick = raft.get('ElectionTick')
+
+ orchestration = spec.get('Orchestration') or dict()
+ if self.task_history_retention_limit is None:
+ self.task_history_retention_limit = orchestration.get('TaskHistoryRetentionLimit')
+
+ encryption_config = spec.get('EncryptionConfig') or dict()
+ if self.autolock_managers is None:
+ self.autolock_managers = encryption_config.get('AutoLockManagers')
+
+ if self.name is None:
+ self.name = spec['Name']
+
+ if self.labels is None:
+ self.labels = spec.get('Labels') or {}
+
+ if 'LogDriver' in spec['TaskDefaults']:
+ self.log_driver = spec['TaskDefaults']['LogDriver']
+
+ def update_parameters(self, client):
+ assign = dict(
+ snapshot_interval='snapshot_interval',
+ task_history_retention_limit='task_history_retention_limit',
+ keep_old_snapshots='keep_old_snapshots',
+ log_entries_for_slow_followers='log_entries_for_slow_followers',
+ heartbeat_tick='heartbeat_tick',
+ election_tick='election_tick',
+ dispatcher_heartbeat_period='dispatcher_heartbeat_period',
+ node_cert_expiry='node_cert_expiry',
+ name='name',
+ labels='labels',
+ signing_ca_cert='signing_ca_cert',
+ signing_ca_key='signing_ca_key',
+ ca_force_rotate='ca_force_rotate',
+ autolock_managers='autolock_managers',
+ log_driver='log_driver',
+ )
+ params = dict()
+ for dest, source in assign.items():
+ if not client.option_minimal_versions[source]['supported']:
+ continue
+ value = getattr(self, source)
+ if value is not None:
+ params[dest] = value
+ self.spec = client.create_swarm_spec(**params)
+
+ def compare_to_active(self, other, client, differences):
+ for k in self.__dict__:
+ if k in ('advertise_addr', 'listen_addr', 'remote_addrs', 'join_token',
+ 'rotate_worker_token', 'rotate_manager_token', 'spec',
+ 'default_addr_pool', 'subnet_size'):
+ continue
+ if not client.option_minimal_versions[k]['supported']:
+ continue
+ value = getattr(self, k)
+ if value is None:
+ continue
+ other_value = getattr(other, k)
+ if value != other_value:
+ differences.add(k, parameter=value, active=other_value)
+ if self.rotate_worker_token:
+ differences.add('rotate_worker_token', parameter=True, active=False)
+ if self.rotate_manager_token:
+ differences.add('rotate_manager_token', parameter=True, active=False)
+ return differences
+
+
+class SwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(SwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.check_mode = self.client.check_mode
+ self.swarm_info = {}
+
+ self.state = client.module.params['state']
+ self.force = client.module.params['force']
+ self.node_id = client.module.params['node_id']
+
+ self.differences = DifferenceTracker()
+ self.parameters = TaskParameters.from_ansible_params(client)
+
+ self.created = False
+
+ def __call__(self):
+ choice_map = {
+ "present": self.init_swarm,
+ "join": self.join,
+ "absent": self.leave,
+ "remove": self.remove,
+ }
+
+ choice_map.get(self.state)()
+
+ if self.client.module._diff or self.parameters.debug:
+ diff = dict()
+ diff['before'], diff['after'] = self.differences.get_before_after()
+ self.results['diff'] = diff
+
+ def inspect_swarm(self):
+ try:
+ data = self.client.inspect_swarm()
+ json_str = json.dumps(data, ensure_ascii=False)
+ self.swarm_info = json.loads(json_str)
+
+ self.results['changed'] = False
+ self.results['swarm_facts'] = self.swarm_info
+
+ unlock_key = self.get_unlock_key()
+ self.swarm_info.update(unlock_key)
+ except APIError:
+ return
+
+ def get_unlock_key(self):
+ default = {'UnlockKey': None}
+ if not self.has_swarm_lock_changed():
+ return default
+ try:
+ return self.client.get_unlock_key() or default
+ except APIError:
+ return default
+
+ def has_swarm_lock_changed(self):
+ return self.parameters.autolock_managers and (
+ self.created or self.differences.has_difference_for('autolock_managers')
+ )
+
+ def init_swarm(self):
+ if not self.force and self.client.check_if_swarm_manager():
+ self.__update_swarm()
+ return
+
+ if not self.check_mode:
+ init_arguments = {
+ 'advertise_addr': self.parameters.advertise_addr,
+ 'listen_addr': self.parameters.listen_addr,
+ 'force_new_cluster': self.force,
+ 'swarm_spec': self.parameters.spec,
+ }
+ if self.parameters.default_addr_pool is not None:
+ init_arguments['default_addr_pool'] = self.parameters.default_addr_pool
+ if self.parameters.subnet_size is not None:
+ init_arguments['subnet_size'] = self.parameters.subnet_size
+ try:
+ self.client.init_swarm(**init_arguments)
+ except APIError as exc:
+ self.client.fail("Can not create a new Swarm Cluster: %s" % to_native(exc))
+
+ if not self.client.check_if_swarm_manager():
+ if not self.check_mode:
+ self.client.fail("Swarm not created or other error!")
+
+ self.created = True
+ self.inspect_swarm()
+ self.results['actions'].append("New Swarm cluster created: %s" % (self.swarm_info.get('ID')))
+ self.differences.add('state', parameter='present', active='absent')
+ self.results['changed'] = True
+ self.results['swarm_facts'] = {
+ 'JoinTokens': self.swarm_info.get('JoinTokens'),
+ 'UnlockKey': self.swarm_info.get('UnlockKey')
+ }
+
+ def __update_swarm(self):
+ try:
+ self.inspect_swarm()
+ version = self.swarm_info['Version']['Index']
+ self.parameters.update_from_swarm_info(self.swarm_info)
+ old_parameters = TaskParameters()
+ old_parameters.update_from_swarm_info(self.swarm_info)
+ self.parameters.compare_to_active(old_parameters, self.client, self.differences)
+ if self.differences.empty:
+ self.results['actions'].append("No modification")
+ self.results['changed'] = False
+ return
+ update_parameters = TaskParameters.from_ansible_params(self.client)
+ update_parameters.update_parameters(self.client)
+ if not self.check_mode:
+ self.client.update_swarm(
+ version=version, swarm_spec=update_parameters.spec,
+ rotate_worker_token=self.parameters.rotate_worker_token,
+ rotate_manager_token=self.parameters.rotate_manager_token)
+ except APIError as exc:
+ self.client.fail("Can not update a Swarm Cluster: %s" % to_native(exc))
+ return
+
+ self.inspect_swarm()
+ self.results['actions'].append("Swarm cluster updated")
+ self.results['changed'] = True
+
+ def join(self):
+ if self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is already part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.join_swarm(
+ remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token,
+ listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr)
+ except APIError as exc:
+ self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("New node is added to swarm cluster")
+ self.differences.add('joined', parameter=True, active=False)
+ self.results['changed'] = True
+
+ def leave(self):
+ if not self.client.check_if_swarm_node():
+ self.results['actions'].append("This node is not part of a swarm.")
+ return
+ if not self.check_mode:
+ try:
+ self.client.leave_swarm(force=self.force)
+ except APIError as exc:
+ self.client.fail("This node can not leave the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node has left the swarm cluster")
+ self.differences.add('joined', parameter='absent', active='present')
+ self.results['changed'] = True
+
+ def remove(self):
+ if not self.client.check_if_swarm_manager():
+ self.client.fail("This node is not a manager.")
+
+ try:
+ status_down = self.client.check_if_swarm_node_is_down(node_id=self.node_id, repeat_check=5)
+ except APIError:
+ return
+
+ if not status_down:
+ self.client.fail("Can not remove the node. The status node is ready and not down.")
+
+ if not self.check_mode:
+ try:
+ self.client.remove_node(node_id=self.node_id, force=self.force)
+ except APIError as exc:
+ self.client.fail("Can not remove the node from the Swarm Cluster: %s" % to_native(exc))
+ self.results['actions'].append("Node is removed from swarm cluster.")
+ self.differences.add('joined', parameter=False, active=True)
+ self.results['changed'] = True
+
+
+def _detect_remove_operation(client):
+ return client.module.params['state'] == 'remove'
+
+
+def main():
+ argument_spec = dict(
+ advertise_addr=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'join', 'absent', 'remove']),
+ force=dict(type='bool', default=False),
+ listen_addr=dict(type='str', default='0.0.0.0:2377'),
+ remote_addrs=dict(type='list', elements='str'),
+ join_token=dict(type='str'),
+ snapshot_interval=dict(type='int'),
+ task_history_retention_limit=dict(type='int'),
+ keep_old_snapshots=dict(type='int'),
+ log_entries_for_slow_followers=dict(type='int'),
+ heartbeat_tick=dict(type='int'),
+ election_tick=dict(type='int'),
+ dispatcher_heartbeat_period=dict(type='int'),
+ node_cert_expiry=dict(type='int'),
+ name=dict(type='str'),
+ labels=dict(type='dict'),
+ signing_ca_cert=dict(type='str'),
+ signing_ca_key=dict(type='str', no_log=True),
+ ca_force_rotate=dict(type='int'),
+ autolock_managers=dict(type='bool'),
+ node_id=dict(type='str'),
+ rotate_worker_token=dict(type='bool', default=False),
+ rotate_manager_token=dict(type='bool', default=False),
+ default_addr_pool=dict(type='list', elements='str'),
+ subnet_size=dict(type='int'),
+ )
+
+ required_if = [
+ ('state', 'join', ['remote_addrs', 'join_token']),
+ ('state', 'remove', ['node_id'])
+ ]
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='2.6.0', docker_api_version='1.32'),
+ signing_ca_cert=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ signing_ca_key=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ ca_force_rotate=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ autolock_managers=dict(docker_py_version='2.6.0'),
+ log_driver=dict(docker_py_version='2.6.0'),
+ remove_operation=dict(
+ docker_py_version='2.4.0',
+ detect_usage=_detect_remove_operation,
+ usage_msg='remove swarm nodes'
+ ),
+ default_addr_pool=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ subnet_size=dict(docker_py_version='4.0.0', docker_api_version='1.39'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ required_if=required_if,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.25',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ results = dict(
+ changed=False,
+ result='',
+ actions=[]
+ )
+
+ SwarmManager(client, results)()
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py
new file mode 100644
index 00000000..095bf7fe
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py
@@ -0,0 +1,384 @@
+#!/usr/bin/python
+#
+# (c) 2019 Piotr Wojciechowski <piotr@it-playground.pl>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_info
+
+short_description: Retrieves facts about Docker Swarm cluster.
+
+description:
+ - Retrieves facts about a Docker Swarm.
+ - Returns lists of swarm objects names for the services - nodes, services, tasks.
+ - The output differs depending on API version available on docker host.
+ - Must be run on Swarm Manager node; otherwise module fails with error message.
+ It does return boolean flags in on both error and success which indicate whether
+ the docker daemon can be communicated with, whether it is in Swarm mode, and
+ whether it is a Swarm Manager node.
+
+
+author:
+ - Piotr Wojciechowski (@WojciechowskiPiotr)
+
+options:
+ nodes:
+ description:
+ - Whether to list swarm nodes.
+ type: bool
+ default: no
+ nodes_filters:
+ description:
+ - A dictionary of filter values used for selecting nodes to list.
+ - "For example, C(name: mynode)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/node_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ services:
+ description:
+ - Whether to list swarm services.
+ type: bool
+ default: no
+ services_filters:
+ description:
+ - A dictionary of filter values used for selecting services to list.
+ - "For example, C(name: myservice)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ls/#filtering)
+ for more information on possible filters.
+ type: dict
+ tasks:
+ description:
+ - Whether to list containers.
+ type: bool
+ default: no
+ tasks_filters:
+ description:
+ - A dictionary of filter values used for selecting tasks to list.
+ - "For example, C(node: mynode-1)."
+ - See L(the docker documentation,https://docs.docker.com/engine/reference/commandline/service_ps/#filtering)
+ for more information on possible filters.
+ type: dict
+ unlock_key:
+ description:
+ - Whether to retrieve the swarm unlock key.
+ type: bool
+ default: no
+ verbose_output:
+ description:
+ - When set to C(yes) and I(nodes), I(services) or I(tasks) is set to C(yes), then the module output will
+ contain verbose information about objects matching the full output of API method.
+ - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/).
+ - The verbose output in this module contains only subset of information returned by I(_info) module
+ for each type of the objects.
+ type: bool
+ default: no
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info on Docker Swarm
+ community.docker.docker_swarm_info:
+ ignore_errors: yes
+ register: result
+
+- name: Inform about basic flags
+ ansible.builtin.debug:
+ msg: |
+ Was able to talk to docker daemon: {{ result.can_talk_to_docker }}
+ Docker in Swarm mode: {{ result.docker_swarm_active }}
+ This is a Manager node: {{ result.docker_swarm_manager }}
+
+- block:
+
+- name: Get info on Docker Swarm and list of registered nodes
+ community.docker.docker_swarm_info:
+ nodes: yes
+ register: result
+
+- name: Get info on Docker Swarm and extended list of registered nodes
+ community.docker.docker_swarm_info:
+ nodes: yes
+ verbose_output: yes
+ register: result
+
+- name: Get info on Docker Swarm and filtered list of registered nodes
+ community.docker.docker_swarm_info:
+ nodes: yes
+ nodes_filters:
+ name: mynode
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_facts
+
+- name: Get the swarm unlock key
+ community.docker.docker_swarm_info:
+ unlock_key: yes
+ register: result
+
+- ansible.builtin.debug:
+ var: result.swarm_unlock_key
+
+'''
+
+RETURN = '''
+can_talk_to_docker:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon.
+ returned: both on success and on error
+ type: bool
+docker_swarm_active:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ and the docker daemon is in Swarm mode.
+ returned: both on success and on error
+ type: bool
+docker_swarm_manager:
+ description:
+ - Will be C(true) if the module can talk to the docker daemon,
+ the docker daemon is in Swarm mode, and the current node is
+ a manager node.
+ - Only if this one is C(true), the module will not fail.
+ returned: both on success and on error
+ type: bool
+swarm_facts:
+ description:
+ - Facts representing the basic state of the docker Swarm cluster.
+ - Contains tokens to connect to the Swarm
+ returned: always
+ type: dict
+swarm_unlock_key:
+ description:
+ - Contains the key needed to unlock the swarm.
+ returned: When I(unlock_key) is C(true).
+ type: str
+nodes:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker node ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(nodes) is C(yes)
+ type: list
+ elements: dict
+services:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ls) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(services) is C(yes)
+ type: list
+ elements: dict
+tasks:
+ description:
+ - List of dict objects containing the basic information about each volume.
+ Keys matches the C(docker service ps) output unless I(verbose_output=yes).
+ See description for I(verbose_output).
+ returned: When I(tasks) is C(yes)
+ type: list
+ elements: dict
+
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker_common
+ pass
+
+from ansible.module_utils._text import to_native
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ DockerBaseClass,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+
+class DockerSwarmManager(DockerBaseClass):
+
+ def __init__(self, client, results):
+
+ super(DockerSwarmManager, self).__init__()
+
+ self.client = client
+ self.results = results
+ self.verbose_output = self.client.module.params['verbose_output']
+
+ listed_objects = ['tasks', 'services', 'nodes']
+
+ self.client.fail_task_if_not_swarm_manager()
+
+ self.results['swarm_facts'] = self.get_docker_swarm_facts()
+
+ for docker_object in listed_objects:
+ if self.client.module.params[docker_object]:
+ returned_name = docker_object
+ filter_name = docker_object + "_filters"
+ filters = clean_dict_booleans_for_docker_api(client.module.params.get(filter_name))
+ self.results[returned_name] = self.get_docker_items_list(docker_object, filters)
+ if self.client.module.params['unlock_key']:
+ self.results['swarm_unlock_key'] = self.get_docker_swarm_unlock_key()
+
+ def get_docker_swarm_facts(self):
+ try:
+ return self.client.inspect_swarm()
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm: %s" % to_native(exc))
+
+ def get_docker_items_list(self, docker_object=None, filters=None):
+ items = None
+ items_list = []
+
+ try:
+ if docker_object == 'nodes':
+ items = self.client.nodes(filters=filters)
+ elif docker_object == 'tasks':
+ items = self.client.tasks(filters=filters)
+ elif docker_object == 'services':
+ items = self.client.services(filters=filters)
+ except APIError as exc:
+ self.client.fail("Error inspecting docker swarm for object '%s': %s" %
+ (docker_object, to_native(exc)))
+
+ if self.verbose_output:
+ return items
+
+ for item in items:
+ item_record = dict()
+
+ if docker_object == 'nodes':
+ item_record = self.get_essential_facts_nodes(item)
+ elif docker_object == 'tasks':
+ item_record = self.get_essential_facts_tasks(item)
+ elif docker_object == 'services':
+ item_record = self.get_essential_facts_services(item)
+ if item_record['Mode'] == 'Global':
+ item_record['Replicas'] = len(items)
+ items_list.append(item_record)
+
+ return items_list
+
+ @staticmethod
+ def get_essential_facts_nodes(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item.get('ID')
+ object_essentials['Hostname'] = item['Description']['Hostname']
+ object_essentials['Status'] = item['Status']['State']
+ object_essentials['Availability'] = item['Spec']['Availability']
+ if 'ManagerStatus' in item:
+ object_essentials['ManagerStatus'] = item['ManagerStatus']['Reachability']
+ if 'Leader' in item['ManagerStatus'] and item['ManagerStatus']['Leader'] is True:
+ object_essentials['ManagerStatus'] = "Leader"
+ else:
+ object_essentials['ManagerStatus'] = None
+ object_essentials['EngineVersion'] = item['Description']['Engine']['EngineVersion']
+
+ return object_essentials
+
+ def get_essential_facts_tasks(self, item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ # Returning container ID to not trigger another connection to host
+ # Container ID is sufficient to get extended info in other tasks
+ object_essentials['ContainerID'] = item['Status']['ContainerStatus']['ContainerID']
+ object_essentials['Image'] = item['Spec']['ContainerSpec']['Image']
+ object_essentials['Node'] = self.client.get_node_name_by_id(item['NodeID'])
+ object_essentials['DesiredState'] = item['DesiredState']
+ object_essentials['CurrentState'] = item['Status']['State']
+ if 'Err' in item['Status']:
+ object_essentials['Error'] = item['Status']['Err']
+ else:
+ object_essentials['Error'] = None
+
+ return object_essentials
+
+ @staticmethod
+ def get_essential_facts_services(item):
+ object_essentials = dict()
+
+ object_essentials['ID'] = item['ID']
+ object_essentials['Name'] = item['Spec']['Name']
+ if 'Replicated' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Replicated"
+ object_essentials['Replicas'] = item['Spec']['Mode']['Replicated']['Replicas']
+ elif 'Global' in item['Spec']['Mode']:
+ object_essentials['Mode'] = "Global"
+ # Number of replicas have to be updated in calling method or may be left as None
+ object_essentials['Replicas'] = None
+ object_essentials['Image'] = item['Spec']['TaskTemplate']['ContainerSpec']['Image']
+ if 'Ports' in item['Spec']['EndpointSpec']:
+ object_essentials['Ports'] = item['Spec']['EndpointSpec']['Ports']
+ else:
+ object_essentials['Ports'] = []
+
+ return object_essentials
+
+ def get_docker_swarm_unlock_key(self):
+ unlock_key = self.client.get_unlock_key() or {}
+ return unlock_key.get('UnlockKey') or None
+
+
+def main():
+ argument_spec = dict(
+ nodes=dict(type='bool', default=False),
+ nodes_filters=dict(type='dict'),
+ tasks=dict(type='bool', default=False),
+ tasks_filters=dict(type='dict'),
+ services=dict(type='bool', default=False),
+ services_filters=dict(type='dict'),
+ unlock_key=dict(type='bool', default=False),
+ verbose_output=dict(type='bool', default=False),
+ )
+ option_minimal_versions = dict(
+ unlock_key=dict(docker_py_version='2.7.0', docker_api_version='1.25'),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ fail_results=dict(
+ can_talk_to_docker=False,
+ docker_swarm_active=False,
+ docker_swarm_manager=False,
+ ),
+ )
+ client.fail_results['can_talk_to_docker'] = True
+ client.fail_results['docker_swarm_active'] = client.check_if_swarm_node()
+ client.fail_results['docker_swarm_manager'] = client.check_if_swarm_manager()
+
+ try:
+ results = dict(
+ changed=False,
+ )
+
+ DockerSwarmManager(client, results)
+ results.update(client.fail_results)
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py
new file mode 100644
index 00000000..f8a4bd00
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py
@@ -0,0 +1,2800 @@
+#!/usr/bin/python
+#
+# (c) 2017, Dario Zanzico (git@dariozanzico.com)
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service
+author:
+ - "Dario Zanzico (@dariko)"
+ - "Jason Witkowski (@jwitko)"
+ - "Hannes Ljungberg (@hannseman)"
+short_description: docker swarm service
+description:
+ - Manages docker services via a swarm manager node.
+options:
+ args:
+ description:
+ - List arguments to be passed to the container.
+ - Corresponds to the C(ARG) parameter of C(docker service create).
+ type: list
+ elements: str
+ command:
+ description:
+ - Command to execute when the container starts.
+ - A command may be either a string or a list or a list of strings.
+ - Corresponds to the C(COMMAND) parameter of C(docker service create).
+ type: raw
+ configs:
+ description:
+ - List of dictionaries describing the service configs.
+ - Corresponds to the C(--config) option of C(docker service create).
+ - Requires API version >= 1.30.
+ type: list
+ elements: dict
+ suboptions:
+ config_id:
+ description:
+ - Config's ID.
+ type: str
+ config_name:
+ description:
+ - Config's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the config. Defaults to the I(config_name) if not specified.
+ type: str
+ uid:
+ description:
+ - UID of the config file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the config file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ container_labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--container-label) option of C(docker service create).
+ type: dict
+ dns:
+ description:
+ - List of custom DNS servers.
+ - Corresponds to the C(--dns) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_search:
+ description:
+ - List of custom DNS search domains.
+ - Corresponds to the C(--dns-search) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ dns_options:
+ description:
+ - List of custom DNS options.
+ - Corresponds to the C(--dns-option) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ endpoint_mode:
+ description:
+ - Service endpoint mode.
+ - Corresponds to the C(--endpoint-mode) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ choices:
+ - vip
+ - dnsrr
+ env:
+ description:
+ - List or dictionary of the service environment variables.
+ - If passed a list each items need to be in the format of C(KEY=VALUE).
+ - If passed a dictionary values which might be parsed as numbers,
+ booleans or other types by the YAML parser must be quoted (e.g. C("true"))
+ in order to avoid data loss.
+ - Corresponds to the C(--env) option of C(docker service create).
+ type: raw
+ env_files:
+ description:
+ - List of paths to files, present on the target, containing environment variables C(FOO=BAR).
+ - The order of the list is significant in determining the value assigned to a
+ variable that shows up more than once.
+ - If variable also present in I(env), then I(env) value will override.
+ type: list
+ elements: path
+ force_update:
+ description:
+ - Force update even if no changes require it.
+ - Corresponds to the C(--force) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: bool
+ default: no
+ groups:
+ description:
+ - List of additional group names and/or IDs that the container process will run as.
+ - Corresponds to the C(--group) option of C(docker service update).
+ - Requires API version >= 1.25.
+ type: list
+ elements: str
+ healthcheck:
+ description:
+ - Configure a check that is run to determine whether or not containers for this service are "healthy".
+ See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck)
+ for details on how healthchecks work.
+ - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format
+ that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Requires API version >= 1.25.
+ type: dict
+ suboptions:
+ test:
+ description:
+ - Command to run to check health.
+ - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL).
+ type: raw
+ interval:
+ description:
+ - Time between running the check.
+ type: str
+ timeout:
+ description:
+ - Maximum time to allow one check to run.
+ type: str
+ retries:
+ description:
+ - Consecutive failures needed to report unhealthy. It accept integer value.
+ type: int
+ start_period:
+ description:
+ - Start period for the container to initialize before starting health-retries countdown.
+ type: str
+ hostname:
+ description:
+ - Container hostname.
+ - Corresponds to the C(--hostname) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ hosts:
+ description:
+ - Dict of host-to-IP mappings, where each host name is a key in the dictionary.
+ Each host name will be added to the container's /etc/hosts file.
+ - Corresponds to the C(--host) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: dict
+ image:
+ description:
+ - Service image path and tag.
+ - Corresponds to the C(IMAGE) parameter of C(docker service create).
+ type: str
+ init:
+ description:
+ - Use an init inside each service container to forward signals and reap processes.
+ - Corresponds to the C(--init) option of C(docker service create).
+ - Requires API version >= 1.37.
+ type: bool
+ labels:
+ description:
+ - Dictionary of key value pairs.
+ - Corresponds to the C(--label) option of C(docker service create).
+ type: dict
+ limits:
+ description:
+ - Configures service resource limits.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU limit. C(0) equals no limit.
+ - Corresponds to the C(--limit-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no limit.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--limit-memory) option of C(docker service create).
+ type: str
+ type: dict
+ logging:
+ description:
+ - "Logging configuration for the service."
+ suboptions:
+ driver:
+ description:
+ - Configure the logging driver for a service.
+ - Corresponds to the C(--log-driver) option of C(docker service create).
+ type: str
+ options:
+ description:
+ - Options for service logging driver.
+ - Corresponds to the C(--log-opt) option of C(docker service create).
+ type: dict
+ type: dict
+ mode:
+ description:
+ - Service replication mode.
+ - Service will be removed and recreated when changed.
+ - Corresponds to the C(--mode) option of C(docker service create).
+ type: str
+ default: replicated
+ choices:
+ - replicated
+ - global
+ mounts:
+ description:
+ - List of dictionaries describing the service mounts.
+ - Corresponds to the C(--mount) option of C(docker service create).
+ type: list
+ elements: dict
+ suboptions:
+ source:
+ description:
+ - Mount source (e.g. a volume name or a host path).
+ - Must be specified if I(type) is not C(tmpfs).
+ type: str
+ target:
+ description:
+ - Container path.
+ type: str
+ required: yes
+ type:
+ description:
+ - The mount type.
+ - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9.
+ type: str
+ default: bind
+ choices:
+ - bind
+ - volume
+ - tmpfs
+ - npipe
+ readonly:
+ description:
+ - Whether the mount should be read-only.
+ type: bool
+ labels:
+ description:
+ - Volume labels to apply.
+ type: dict
+ propagation:
+ description:
+ - The propagation mode to use.
+ - Can only be used when I(type) is C(bind).
+ type: str
+ choices:
+ - shared
+ - slave
+ - private
+ - rshared
+ - rslave
+ - rprivate
+ no_copy:
+ description:
+ - Disable copying of data from a container when a volume is created.
+ - Can only be used when I(type) is C(volume).
+ type: bool
+ driver_config:
+ description:
+ - Volume driver configuration.
+ - Can only be used when I(type) is C(volume).
+ suboptions:
+ name:
+ description:
+ - Name of the volume-driver plugin to use for the volume.
+ type: str
+ options:
+ description:
+ - Options as key-value pairs to pass to the driver for this volume.
+ type: dict
+ type: dict
+ tmpfs_size:
+ description:
+ - "Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - Can only be used when I(type) is C(tmpfs).
+ type: str
+ tmpfs_mode:
+ description:
+ - File mode of the tmpfs in octal.
+ - Can only be used when I(type) is C(tmpfs).
+ type: int
+ name:
+ description:
+ - Service name.
+ - Corresponds to the C(--name) option of C(docker service create).
+ type: str
+ required: yes
+ networks:
+ description:
+ - List of the service networks names or dictionaries.
+ - When passed dictionaries valid sub-options are I(name), which is required, and
+ I(aliases) and I(options).
+ - Prior to API version 1.29, updating and removing networks is not supported.
+ If changes are made the service will then be removed and recreated.
+ - Corresponds to the C(--network) option of C(docker service create).
+ type: list
+ elements: raw
+ placement:
+ description:
+ - Configures service placement preferences and constraints.
+ suboptions:
+ constraints:
+ description:
+ - List of the service constraints.
+ - Corresponds to the C(--constraint) option of C(docker service create).
+ type: list
+ elements: str
+ preferences:
+ description:
+ - List of the placement preferences as key value pairs.
+ - Corresponds to the C(--placement-pref) option of C(docker service create).
+ - Requires API version >= 1.27.
+ type: list
+ elements: dict
+ type: dict
+ publish:
+ description:
+ - List of dictionaries describing the service published ports.
+ - Corresponds to the C(--publish) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ published_port:
+ description:
+ - The port to make externally available.
+ type: int
+ required: yes
+ target_port:
+ description:
+ - The port inside the container to expose.
+ type: int
+ required: yes
+ protocol:
+ description:
+ - What protocol to use.
+ type: str
+ default: tcp
+ choices:
+ - tcp
+ - udp
+ mode:
+ description:
+ - What publish mode to use.
+ - Requires API version >= 1.32.
+ type: str
+ choices:
+ - ingress
+ - host
+ read_only:
+ description:
+ - Mount the containers root filesystem as read only.
+ - Corresponds to the C(--read-only) option of C(docker service create).
+ type: bool
+ replicas:
+ description:
+ - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated).
+ - If set to C(-1), and service is not present, service replicas will be set to C(1).
+ - If set to C(-1), and service is present, service replicas will be unchanged.
+ - Corresponds to the C(--replicas) option of C(docker service create).
+ type: int
+ default: -1
+ reservations:
+ description:
+ - Configures service resource reservations.
+ suboptions:
+ cpus:
+ description:
+ - Service CPU reservation. C(0) equals no reservation.
+ - Corresponds to the C(--reserve-cpu) option of C(docker service create).
+ type: float
+ memory:
+ description:
+ - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer.
+ Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte),
+ C(T) (tebibyte), or C(P) (pebibyte)."
+ - C(0) equals no reservation.
+ - Omitting the unit defaults to bytes.
+ - Corresponds to the C(--reserve-memory) option of C(docker service create).
+ type: str
+ type: dict
+ resolve_image:
+ description:
+ - If the current image digest should be resolved from registry and updated if changed.
+ - Requires API version >= 1.30.
+ type: bool
+ default: no
+ restart_config:
+ description:
+ - Configures if and how to restart containers when they exit.
+ suboptions:
+ condition:
+ description:
+ - Restart condition of the service.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: str
+ choices:
+ - none
+ - on-failure
+ - any
+ delay:
+ description:
+ - Delay between restarts.
+ - "Accepts a a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-delay) option of C(docker service create).
+ type: str
+ max_attempts:
+ description:
+ - Maximum number of service restarts.
+ - Corresponds to the C(--restart-condition) option of C(docker service create).
+ type: int
+ window:
+ description:
+ - Restart policy evaluation window.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--restart-window) option of C(docker service create).
+ type: str
+ type: dict
+ rollback_config:
+ description:
+ - Configures how the service should be rolled back in case of a failing update.
+ suboptions:
+ parallelism:
+ description:
+ - The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously.
+ - Corresponds to the C(--rollback-parallelism) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: int
+ delay:
+ description:
+ - Delay between task rollbacks.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-delay) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of rollback failure.
+ - Corresponds to the C(--rollback-failure-action) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ choices:
+ - continue
+ - pause
+ monitor:
+ description:
+ - Duration after each task rollback to monitor for failure.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--rollback-monitor) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during a rollback.
+ - Corresponds to the C(--rollback-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.28.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations during rollbacks.
+ - Corresponds to the C(--rollback-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ secrets:
+ description:
+ - List of dictionaries describing the service secrets.
+ - Corresponds to the C(--secret) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: list
+ elements: dict
+ suboptions:
+ secret_id:
+ description:
+ - Secret's ID.
+ type: str
+ secret_name:
+ description:
+ - Secret's name as defined at its creation.
+ type: str
+ required: yes
+ filename:
+ description:
+ - Name of the file containing the secret. Defaults to the I(secret_name) if not specified.
+ - Corresponds to the C(target) key of C(docker service create --secret).
+ type: str
+ uid:
+ description:
+ - UID of the secret file's owner.
+ type: str
+ gid:
+ description:
+ - GID of the secret file's group.
+ type: str
+ mode:
+ description:
+ - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)).
+ type: int
+ state:
+ description:
+ - C(absent) - A service matching the specified name will be removed and have its tasks stopped.
+ - C(present) - Asserts the existence of a service matching the name and provided configuration parameters.
+ Unspecified configuration parameters will be set to docker defaults.
+ type: str
+ default: present
+ choices:
+ - present
+ - absent
+ stop_grace_period:
+ description:
+ - Time to wait before force killing a container.
+ - "Accepts a duration as a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--stop-grace-period) option of C(docker service create).
+ type: str
+ stop_signal:
+ description:
+ - Override default signal used to stop the container.
+ - Corresponds to the C(--stop-signal) option of C(docker service create).
+ type: str
+ tty:
+ description:
+ - Allocate a pseudo-TTY.
+ - Corresponds to the C(--tty) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: bool
+ update_config:
+ description:
+ - Configures how the service should be updated. Useful for configuring rolling updates.
+ suboptions:
+ parallelism:
+ description:
+ - Rolling update parallelism.
+ - Corresponds to the C(--update-parallelism) option of C(docker service create).
+ type: int
+ delay:
+ description:
+ - Rolling update delay.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-delay) option of C(docker service create).
+ type: str
+ failure_action:
+ description:
+ - Action to take in case of container failure.
+ - Corresponds to the C(--update-failure-action) option of C(docker service create).
+ - Usage of I(rollback) requires API version >= 1.29.
+ type: str
+ choices:
+ - continue
+ - pause
+ - rollback
+ monitor:
+ description:
+ - Time to monitor updated tasks for failures.
+ - "Accepts a string in a format that look like:
+ C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)."
+ - Corresponds to the C(--update-monitor) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: str
+ max_failure_ratio:
+ description:
+ - Fraction of tasks that may fail during an update before the failure action is invoked.
+ - Corresponds to the C(--update-max-failure-ratio) option of C(docker service create).
+ - Requires API version >= 1.25.
+ type: float
+ order:
+ description:
+ - Specifies the order of operations when rolling out an updated task.
+ - Corresponds to the C(--update-order) option of C(docker service create).
+ - Requires API version >= 1.29.
+ type: str
+ type: dict
+ user:
+ description:
+ - Sets the username or UID used for the specified command.
+ - Before Ansible 2.8, the default value for this option was C(root).
+ - The default has been removed so that the user defined in the image is used if no user is specified here.
+ - Corresponds to the C(--user) option of C(docker service create).
+ type: str
+ working_dir:
+ description:
+ - Path to the working directory.
+ - Corresponds to the C(--workdir) option of C(docker service create).
+ type: str
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_2_documentation
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.2"
+ - "Docker API >= 1.24"
+notes:
+ - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0.
+ When using older versions use C(force_update: true) to trigger the swarm to resolve a new image."
+'''
+
+RETURN = '''
+swarm_service:
+ returned: always
+ type: dict
+ description:
+ - Dictionary of variables representing the current state of the service.
+ Matches the module parameters format.
+ - Note that facts are not part of registered vars but accessible directly.
+ - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service),
+ while the module actually returned a variable called C(ansible_docker_service). The variable
+ was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0.
+ In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used.
+ sample: '{
+ "args": [
+ "3600"
+ ],
+ "command": [
+ "sleep"
+ ],
+ "configs": null,
+ "constraints": [
+ "node.role == manager",
+ "engine.labels.operatingsystem == ubuntu 14.04"
+ ],
+ "container_labels": null,
+ "dns": null,
+ "dns_options": null,
+ "dns_search": null,
+ "endpoint_mode": null,
+ "env": [
+ "ENVVAR1=envvar1",
+ "ENVVAR2=envvar2"
+ ],
+ "force_update": null,
+ "groups": null,
+ "healthcheck": {
+ "interval": 90000000000,
+ "retries": 3,
+ "start_period": 30000000000,
+ "test": [
+ "CMD",
+ "curl",
+ "--fail",
+ "http://nginx.host.com"
+ ],
+ "timeout": 10000000000
+ },
+ "healthcheck_disabled": false,
+ "hostname": null,
+ "hosts": null,
+ "image": "alpine:latest@sha256:b3dbf31b77fd99d9c08f780ce6f5282aba076d70a513a8be859d8d3a4d0c92b8",
+ "labels": {
+ "com.example.department": "Finance",
+ "com.example.description": "Accounting webapp"
+ },
+ "limit_cpu": 0.5,
+ "limit_memory": 52428800,
+ "log_driver": "fluentd",
+ "log_driver_options": {
+ "fluentd-address": "127.0.0.1:24224",
+ "fluentd-async-connect": "true",
+ "tag": "myservice"
+ },
+ "mode": "replicated",
+ "mounts": [
+ {
+ "readonly": false,
+ "source": "/tmp/",
+ "target": "/remote_tmp/",
+ "type": "bind",
+ "labels": null,
+ "propagation": null,
+ "no_copy": null,
+ "driver_config": null,
+ "tmpfs_size": null,
+ "tmpfs_mode": null
+ }
+ ],
+ "networks": null,
+ "placement_preferences": [
+ {
+ "spread": "node.labels.mylabel"
+ }
+ ],
+ "publish": null,
+ "read_only": null,
+ "replicas": 1,
+ "reserve_cpu": 0.25,
+ "reserve_memory": 20971520,
+ "restart_policy": "on-failure",
+ "restart_policy_attempts": 3,
+ "restart_policy_delay": 5000000000,
+ "restart_policy_window": 120000000000,
+ "secrets": null,
+ "stop_grace_period": null,
+ "stop_signal": null,
+ "tty": null,
+ "update_delay": 10000000000,
+ "update_failure_action": null,
+ "update_max_failure_ratio": null,
+ "update_monitor": null,
+ "update_order": "stop-first",
+ "update_parallelism": 2,
+ "user": null,
+ "working_dir": null
+ }'
+changes:
+ returned: always
+ description:
+ - List of changed service attributes if a service has been altered, [] otherwise.
+ type: list
+ elements: str
+ sample: ['container_labels', 'replicas']
+rebuilt:
+ returned: always
+ description:
+ - True if the service has been recreated (removed and created)
+ type: bool
+ sample: True
+'''
+
+EXAMPLES = '''
+- name: Set command and arguments
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ command: sleep
+ args:
+ - "3600"
+
+- name: Set a bind mount
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ mounts:
+ - source: /tmp/
+ target: /remote_tmp/
+ type: bind
+
+- name: Set service labels
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ labels:
+ com.example.description: "Accounting webapp"
+ com.example.department: "Finance"
+
+- name: Set environment variables
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ env:
+ ENVVAR1: envvar1
+ ENVVAR2: envvar2
+ env_files:
+ - envs/common.env
+ - envs/apps/web.env
+
+- name: Set fluentd logging
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ logging:
+ driver: fluentd
+ options:
+ fluentd-address: "127.0.0.1:24224"
+ fluentd-async-connect: "true"
+ tag: myservice
+
+- name: Set restart policies
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ restart_config:
+ condition: on-failure
+ delay: 5s
+ max_attempts: 3
+ window: 120s
+
+- name: Set update config
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set rollback config
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine
+ update_config:
+ failure_action: rollback
+ rollback_config:
+ parallelism: 2
+ delay: 10s
+ order: stop-first
+
+- name: Set placement preferences
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ placement:
+ preferences:
+ - spread: node.labels.mylabel
+ constraints:
+ - node.role == manager
+ - engine.labels.operatingsystem == ubuntu 14.04
+
+- name: Set configs
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ configs:
+ - config_name: myconfig_name
+ filename: "/tmp/config.txt"
+
+- name: Set networks
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - mynetwork
+
+- name: Set networks as a dictionary
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ networks:
+ - name: "mynetwork"
+ aliases:
+ - "mynetwork_alias"
+ options:
+ foo: bar
+
+- name: Set secrets
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ secrets:
+ - secret_name: mysecret_name
+ filename: "/run/secrets/secret.txt"
+
+- name: Start service with healthcheck
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: nginx:1.13
+ healthcheck:
+ # Check if nginx server is healthy by curl'ing the server.
+ # If this fails or timeouts, the healthcheck fails.
+ test: ["CMD", "curl", "--fail", "http://nginx.host.com"]
+ interval: 1m30s
+ timeout: 10s
+ retries: 3
+ start_period: 30s
+
+- name: Configure service resources
+ community.docker.docker_swarm_service:
+ name: myservice
+ image: alpine:edge
+ reservations:
+ cpus: 0.25
+ memory: 20M
+ limits:
+ cpus: 0.50
+ memory: 50M
+
+- name: Remove service
+ community.docker.docker_swarm_service:
+ name: myservice
+ state: absent
+'''
+
+import shlex
+import time
+import operator
+import traceback
+
+from distutils.version import LooseVersion
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ DifferenceTracker,
+ DockerBaseClass,
+ convert_duration_to_nanosecond,
+ parse_healthcheck,
+ clean_dict_booleans_for_docker_api,
+ RequestException,
+)
+
+from ansible.module_utils.basic import human_to_bytes
+from ansible.module_utils.six import string_types
+from ansible.module_utils._text import to_text
+
+try:
+ from docker import types
+ from docker.utils import (
+ parse_repository_tag,
+ parse_env_file,
+ format_environment,
+ )
+ from docker.errors import (
+ APIError,
+ DockerException,
+ NotFound,
+ )
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+
+def get_docker_environment(env, env_files):
+ """
+ Will return a list of "KEY=VALUE" items. Supplied env variable can
+ be either a list or a dictionary.
+
+ If environment files are combined with explicit environment variables,
+ the explicit environment variables take precedence.
+ """
+ env_dict = {}
+ if env_files:
+ for env_file in env_files:
+ parsed_env_file = parse_env_file(env_file)
+ for name, value in parsed_env_file.items():
+ env_dict[name] = str(value)
+ if env is not None and isinstance(env, string_types):
+ env = env.split(',')
+ if env is not None and isinstance(env, dict):
+ for name, value in env.items():
+ if not isinstance(value, string_types):
+ raise ValueError(
+ 'Non-string value found for env option. '
+ 'Ambiguous env options must be wrapped in quotes to avoid YAML parsing. Key: %s' % name
+ )
+ env_dict[name] = str(value)
+ elif env is not None and isinstance(env, list):
+ for item in env:
+ try:
+ name, value = item.split('=', 1)
+ except ValueError:
+ raise ValueError('Invalid environment variable found in list, needs to be in format KEY=VALUE.')
+ env_dict[name] = value
+ elif env is not None:
+ raise ValueError(
+ 'Invalid type for env %s (%s). Only list or dict allowed.' % (env, type(env))
+ )
+ env_list = format_environment(env_dict)
+ if not env_list:
+ if env is not None or env_files is not None:
+ return []
+ else:
+ return None
+ return sorted(env_list)
+
+
+def get_docker_networks(networks, network_ids):
+ """
+ Validate a list of network names or a list of network dictionaries.
+ Network names will be resolved to ids by using the network_ids mapping.
+ """
+ if networks is None:
+ return None
+ parsed_networks = []
+ for network in networks:
+ if isinstance(network, string_types):
+ parsed_network = {'name': network}
+ elif isinstance(network, dict):
+ if 'name' not in network:
+ raise TypeError(
+ '"name" is required when networks are passed as dictionaries.'
+ )
+ name = network.pop('name')
+ parsed_network = {'name': name}
+ aliases = network.pop('aliases', None)
+ if aliases is not None:
+ if not isinstance(aliases, list):
+ raise TypeError('"aliases" network option is only allowed as a list')
+ if not all(
+ isinstance(alias, string_types) for alias in aliases
+ ):
+ raise TypeError('Only strings are allowed as network aliases.')
+ parsed_network['aliases'] = aliases
+ options = network.pop('options', None)
+ if options is not None:
+ if not isinstance(options, dict):
+ raise TypeError('Only dict is allowed as network options.')
+ parsed_network['options'] = clean_dict_booleans_for_docker_api(options)
+ # Check if any invalid keys left
+ if network:
+ invalid_keys = ', '.join(network.keys())
+ raise TypeError(
+ '%s are not valid keys for the networks option' % invalid_keys
+ )
+
+ else:
+ raise TypeError(
+ 'Only a list of strings or dictionaries are allowed to be passed as networks.'
+ )
+ network_name = parsed_network.pop('name')
+ try:
+ parsed_network['id'] = network_ids[network_name]
+ except KeyError as e:
+ raise ValueError('Could not find a network named: %s.' % e)
+ parsed_networks.append(parsed_network)
+ return parsed_networks or []
+
+
+def get_nanoseconds_from_raw_option(name, value):
+ if value is None:
+ return None
+ elif isinstance(value, int):
+ return value
+ elif isinstance(value, string_types):
+ try:
+ return int(value)
+ except ValueError:
+ return convert_duration_to_nanosecond(value)
+ else:
+ raise ValueError(
+ 'Invalid type for %s %s (%s). Only string or int allowed.'
+ % (name, value, type(value))
+ )
+
+
+def get_value(key, values, default=None):
+ value = values.get(key)
+ return value if value is not None else default
+
+
+def has_dict_changed(new_dict, old_dict):
+ """
+ Check if new_dict has differences compared to old_dict while
+ ignoring keys in old_dict which are None in new_dict.
+ """
+ if new_dict is None:
+ return False
+ if not new_dict and old_dict:
+ return True
+ if not old_dict and new_dict:
+ return True
+ defined_options = dict(
+ (option, value) for option, value in new_dict.items()
+ if value is not None
+ )
+ for option, value in defined_options.items():
+ old_value = old_dict.get(option)
+ if not value and not old_value:
+ continue
+ if value != old_value:
+ return True
+ return False
+
+
+def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None):
+ """
+ Check two lists have differences. Sort lists by default.
+ """
+
+ def sort_list(unsorted_list):
+ """
+ Sort a given list.
+ The list may contain dictionaries, so use the sort key to handle them.
+ """
+
+ if unsorted_list and isinstance(unsorted_list[0], dict):
+ if not sort_key:
+ raise Exception(
+ 'A sort key was not specified when sorting list'
+ )
+ else:
+ return sorted(unsorted_list, key=lambda k: k[sort_key])
+
+ # Either the list is empty or does not contain dictionaries
+ try:
+ return sorted(unsorted_list)
+ except TypeError:
+ return unsorted_list
+
+ if new_list is None:
+ return False
+ old_list = old_list or []
+ if len(new_list) != len(old_list):
+ return True
+
+ if sort_lists:
+ zip_data = zip(sort_list(new_list), sort_list(old_list))
+ else:
+ zip_data = zip(new_list, old_list)
+ for new_item, old_item in zip_data:
+ is_same_type = type(new_item) == type(old_item)
+ if not is_same_type:
+ if isinstance(new_item, string_types) and isinstance(old_item, string_types):
+ # Even though the types are different between these items,
+ # they are both strings. Try matching on the same string type.
+ try:
+ new_item_type = type(new_item)
+ old_item_casted = new_item_type(old_item)
+ if new_item != old_item_casted:
+ return True
+ else:
+ continue
+ except UnicodeEncodeError:
+ # Fallback to assuming the strings are different
+ return True
+ else:
+ return True
+ if isinstance(new_item, dict):
+ if has_dict_changed(new_item, old_item):
+ return True
+ elif new_item != old_item:
+ return True
+
+ return False
+
+
+def have_networks_changed(new_networks, old_networks):
+ """Special case list checking for networks to sort aliases"""
+
+ if new_networks is None:
+ return False
+ old_networks = old_networks or []
+ if len(new_networks) != len(old_networks):
+ return True
+
+ zip_data = zip(
+ sorted(new_networks, key=lambda k: k['id']),
+ sorted(old_networks, key=lambda k: k['id'])
+ )
+
+ for new_item, old_item in zip_data:
+ new_item = dict(new_item)
+ old_item = dict(old_item)
+ # Sort the aliases
+ if 'aliases' in new_item:
+ new_item['aliases'] = sorted(new_item['aliases'] or [])
+ if 'aliases' in old_item:
+ old_item['aliases'] = sorted(old_item['aliases'] or [])
+
+ if has_dict_changed(new_item, old_item):
+ return True
+
+ return False
+
+
+class DockerService(DockerBaseClass):
+ def __init__(self, docker_api_version, docker_py_version):
+ super(DockerService, self).__init__()
+ self.image = ""
+ self.command = None
+ self.args = None
+ self.endpoint_mode = None
+ self.dns = None
+ self.healthcheck = None
+ self.healthcheck_disabled = None
+ self.hostname = None
+ self.hosts = None
+ self.tty = None
+ self.dns_search = None
+ self.dns_options = None
+ self.env = None
+ self.force_update = None
+ self.groups = None
+ self.log_driver = None
+ self.log_driver_options = None
+ self.labels = None
+ self.container_labels = None
+ self.limit_cpu = None
+ self.limit_memory = None
+ self.reserve_cpu = None
+ self.reserve_memory = None
+ self.mode = "replicated"
+ self.user = None
+ self.mounts = None
+ self.configs = None
+ self.secrets = None
+ self.constraints = None
+ self.networks = None
+ self.stop_grace_period = None
+ self.stop_signal = None
+ self.publish = None
+ self.placement_preferences = None
+ self.replicas = -1
+ self.service_id = False
+ self.service_version = False
+ self.read_only = None
+ self.restart_policy = None
+ self.restart_policy_attempts = None
+ self.restart_policy_delay = None
+ self.restart_policy_window = None
+ self.rollback_config = None
+ self.update_delay = None
+ self.update_parallelism = None
+ self.update_failure_action = None
+ self.update_monitor = None
+ self.update_max_failure_ratio = None
+ self.update_order = None
+ self.working_dir = None
+ self.init = None
+
+ self.docker_api_version = docker_api_version
+ self.docker_py_version = docker_py_version
+
+ def get_facts(self):
+ return {
+ 'image': self.image,
+ 'mounts': self.mounts,
+ 'configs': self.configs,
+ 'networks': self.networks,
+ 'command': self.command,
+ 'args': self.args,
+ 'tty': self.tty,
+ 'dns': self.dns,
+ 'dns_search': self.dns_search,
+ 'dns_options': self.dns_options,
+ 'healthcheck': self.healthcheck,
+ 'healthcheck_disabled': self.healthcheck_disabled,
+ 'hostname': self.hostname,
+ 'hosts': self.hosts,
+ 'env': self.env,
+ 'force_update': self.force_update,
+ 'groups': self.groups,
+ 'log_driver': self.log_driver,
+ 'log_driver_options': self.log_driver_options,
+ 'publish': self.publish,
+ 'constraints': self.constraints,
+ 'placement_preferences': self.placement_preferences,
+ 'labels': self.labels,
+ 'container_labels': self.container_labels,
+ 'mode': self.mode,
+ 'replicas': self.replicas,
+ 'endpoint_mode': self.endpoint_mode,
+ 'restart_policy': self.restart_policy,
+ 'secrets': self.secrets,
+ 'stop_grace_period': self.stop_grace_period,
+ 'stop_signal': self.stop_signal,
+ 'limit_cpu': self.limit_cpu,
+ 'limit_memory': self.limit_memory,
+ 'read_only': self.read_only,
+ 'reserve_cpu': self.reserve_cpu,
+ 'reserve_memory': self.reserve_memory,
+ 'restart_policy_delay': self.restart_policy_delay,
+ 'restart_policy_attempts': self.restart_policy_attempts,
+ 'restart_policy_window': self.restart_policy_window,
+ 'rollback_config': self.rollback_config,
+ 'update_delay': self.update_delay,
+ 'update_parallelism': self.update_parallelism,
+ 'update_failure_action': self.update_failure_action,
+ 'update_monitor': self.update_monitor,
+ 'update_max_failure_ratio': self.update_max_failure_ratio,
+ 'update_order': self.update_order,
+ 'user': self.user,
+ 'working_dir': self.working_dir,
+ 'init': self.init,
+ }
+
+ @property
+ def can_update_networks(self):
+ # Before Docker API 1.29 adding/removing networks was not supported
+ return (
+ self.docker_api_version >= LooseVersion('1.29') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @property
+ def can_use_task_template_networks(self):
+ # In Docker API 1.25 attaching networks to TaskTemplate is preferred over Spec
+ return (
+ self.docker_api_version >= LooseVersion('1.25') and
+ self.docker_py_version >= LooseVersion('2.7')
+ )
+
+ @staticmethod
+ def get_restart_config_from_ansible_params(params):
+ restart_config = params['restart_config'] or {}
+ condition = get_value(
+ 'condition',
+ restart_config,
+ )
+ delay = get_value(
+ 'delay',
+ restart_config,
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'restart_policy_delay',
+ delay
+ )
+ max_attempts = get_value(
+ 'max_attempts',
+ restart_config,
+ )
+ window = get_value(
+ 'window',
+ restart_config,
+ )
+ window = get_nanoseconds_from_raw_option(
+ 'restart_policy_window',
+ window
+ )
+ return {
+ 'restart_policy': condition,
+ 'restart_policy_delay': delay,
+ 'restart_policy_attempts': max_attempts,
+ 'restart_policy_window': window
+ }
+
+ @staticmethod
+ def get_update_config_from_ansible_params(params):
+ update_config = params['update_config'] or {}
+ parallelism = get_value(
+ 'parallelism',
+ update_config,
+ )
+ delay = get_value(
+ 'delay',
+ update_config,
+ )
+ delay = get_nanoseconds_from_raw_option(
+ 'update_delay',
+ delay
+ )
+ failure_action = get_value(
+ 'failure_action',
+ update_config,
+ )
+ monitor = get_value(
+ 'monitor',
+ update_config,
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'update_monitor',
+ monitor
+ )
+ max_failure_ratio = get_value(
+ 'max_failure_ratio',
+ update_config,
+ )
+ order = get_value(
+ 'order',
+ update_config,
+ )
+ return {
+ 'update_parallelism': parallelism,
+ 'update_delay': delay,
+ 'update_failure_action': failure_action,
+ 'update_monitor': monitor,
+ 'update_max_failure_ratio': max_failure_ratio,
+ 'update_order': order
+ }
+
+ @staticmethod
+ def get_rollback_config_from_ansible_params(params):
+ if params['rollback_config'] is None:
+ return None
+ rollback_config = params['rollback_config'] or {}
+ delay = get_nanoseconds_from_raw_option(
+ 'rollback_config.delay',
+ rollback_config.get('delay')
+ )
+ monitor = get_nanoseconds_from_raw_option(
+ 'rollback_config.monitor',
+ rollback_config.get('monitor')
+ )
+ return {
+ 'parallelism': rollback_config.get('parallelism'),
+ 'delay': delay,
+ 'failure_action': rollback_config.get('failure_action'),
+ 'monitor': monitor,
+ 'max_failure_ratio': rollback_config.get('max_failure_ratio'),
+ 'order': rollback_config.get('order'),
+
+ }
+
+ @staticmethod
+ def get_logging_from_ansible_params(params):
+ logging_config = params['logging'] or {}
+ driver = get_value(
+ 'driver',
+ logging_config,
+ )
+ options = get_value(
+ 'options',
+ logging_config,
+ )
+ return {
+ 'log_driver': driver,
+ 'log_driver_options': options,
+ }
+
+ @staticmethod
+ def get_limits_from_ansible_params(params):
+ limits = params['limits'] or {}
+ cpus = get_value(
+ 'cpus',
+ limits,
+ )
+ memory = get_value(
+ 'memory',
+ limits,
+ )
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert limit_memory to bytes: %s' % exc)
+ return {
+ 'limit_cpu': cpus,
+ 'limit_memory': memory,
+ }
+
+ @staticmethod
+ def get_reservations_from_ansible_params(params):
+ reservations = params['reservations'] or {}
+ cpus = get_value(
+ 'cpus',
+ reservations,
+ )
+ memory = get_value(
+ 'memory',
+ reservations,
+ )
+
+ if memory is not None:
+ try:
+ memory = human_to_bytes(memory)
+ except ValueError as exc:
+ raise Exception('Failed to convert reserve_memory to bytes: %s' % exc)
+ return {
+ 'reserve_cpu': cpus,
+ 'reserve_memory': memory,
+ }
+
+ @staticmethod
+ def get_placement_from_ansible_params(params):
+ placement = params['placement'] or {}
+ constraints = get_value(
+ 'constraints',
+ placement
+ )
+
+ preferences = placement.get('preferences')
+ return {
+ 'constraints': constraints,
+ 'placement_preferences': preferences,
+ }
+
+ @classmethod
+ def from_ansible_params(
+ cls,
+ ap,
+ old_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ docker_api_version,
+ docker_py_version,
+ ):
+ s = DockerService(docker_api_version, docker_py_version)
+ s.image = image_digest
+ s.args = ap['args']
+ s.endpoint_mode = ap['endpoint_mode']
+ s.dns = ap['dns']
+ s.dns_search = ap['dns_search']
+ s.dns_options = ap['dns_options']
+ s.healthcheck, s.healthcheck_disabled = parse_healthcheck(ap['healthcheck'])
+ s.hostname = ap['hostname']
+ s.hosts = ap['hosts']
+ s.tty = ap['tty']
+ s.labels = ap['labels']
+ s.container_labels = ap['container_labels']
+ s.mode = ap['mode']
+ s.stop_signal = ap['stop_signal']
+ s.user = ap['user']
+ s.working_dir = ap['working_dir']
+ s.read_only = ap['read_only']
+ s.init = ap['init']
+
+ s.networks = get_docker_networks(ap['networks'], network_ids)
+
+ s.command = ap['command']
+ if isinstance(s.command, string_types):
+ s.command = shlex.split(s.command)
+ elif isinstance(s.command, list):
+ invalid_items = [
+ (index, item)
+ for index, item in enumerate(s.command)
+ if not isinstance(item, string_types)
+ ]
+ if invalid_items:
+ errors = ', '.join(
+ [
+ '%s (%s) at index %s' % (item, type(item), index)
+ for index, item in invalid_items
+ ]
+ )
+ raise Exception(
+ 'All items in a command list need to be strings. '
+ 'Check quoting. Invalid items: %s.'
+ % errors
+ )
+ s.command = ap['command']
+ elif s.command is not None:
+ raise ValueError(
+ 'Invalid type for command %s (%s). '
+ 'Only string or list allowed. Check quoting.'
+ % (s.command, type(s.command))
+ )
+
+ s.env = get_docker_environment(ap['env'], ap['env_files'])
+ s.rollback_config = cls.get_rollback_config_from_ansible_params(ap)
+
+ update_config = cls.get_update_config_from_ansible_params(ap)
+ for key, value in update_config.items():
+ setattr(s, key, value)
+
+ restart_config = cls.get_restart_config_from_ansible_params(ap)
+ for key, value in restart_config.items():
+ setattr(s, key, value)
+
+ logging_config = cls.get_logging_from_ansible_params(ap)
+ for key, value in logging_config.items():
+ setattr(s, key, value)
+
+ limits = cls.get_limits_from_ansible_params(ap)
+ for key, value in limits.items():
+ setattr(s, key, value)
+
+ reservations = cls.get_reservations_from_ansible_params(ap)
+ for key, value in reservations.items():
+ setattr(s, key, value)
+
+ placement = cls.get_placement_from_ansible_params(ap)
+ for key, value in placement.items():
+ setattr(s, key, value)
+
+ if ap['stop_grace_period'] is not None:
+ s.stop_grace_period = convert_duration_to_nanosecond(ap['stop_grace_period'])
+
+ if ap['force_update']:
+ s.force_update = int(str(time.time()).replace('.', ''))
+
+ if ap['groups'] is not None:
+ # In case integers are passed as groups, we need to convert them to
+ # strings as docker internally treats them as strings.
+ s.groups = [str(g) for g in ap['groups']]
+
+ if ap['replicas'] == -1:
+ if old_service:
+ s.replicas = old_service.replicas
+ else:
+ s.replicas = 1
+ else:
+ s.replicas = ap['replicas']
+
+ if ap['publish'] is not None:
+ s.publish = []
+ for param_p in ap['publish']:
+ service_p = {}
+ service_p['protocol'] = param_p['protocol']
+ service_p['mode'] = param_p['mode']
+ service_p['published_port'] = param_p['published_port']
+ service_p['target_port'] = param_p['target_port']
+ s.publish.append(service_p)
+
+ if ap['mounts'] is not None:
+ s.mounts = []
+ for param_m in ap['mounts']:
+ service_m = {}
+ service_m['readonly'] = param_m['readonly']
+ service_m['type'] = param_m['type']
+ if param_m['source'] is None and param_m['type'] != 'tmpfs':
+ raise ValueError('Source must be specified for mounts which are not of type tmpfs')
+ service_m['source'] = param_m['source'] or ''
+ service_m['target'] = param_m['target']
+ service_m['labels'] = param_m['labels']
+ service_m['no_copy'] = param_m['no_copy']
+ service_m['propagation'] = param_m['propagation']
+ service_m['driver_config'] = param_m['driver_config']
+ service_m['tmpfs_mode'] = param_m['tmpfs_mode']
+ tmpfs_size = param_m['tmpfs_size']
+ if tmpfs_size is not None:
+ try:
+ tmpfs_size = human_to_bytes(tmpfs_size)
+ except ValueError as exc:
+ raise ValueError(
+ 'Failed to convert tmpfs_size to bytes: %s' % exc
+ )
+
+ service_m['tmpfs_size'] = tmpfs_size
+ s.mounts.append(service_m)
+
+ if ap['configs'] is not None:
+ s.configs = []
+ for param_m in ap['configs']:
+ service_c = {}
+ config_name = param_m['config_name']
+ service_c['config_id'] = param_m['config_id'] or config_ids[config_name]
+ service_c['config_name'] = config_name
+ service_c['filename'] = param_m['filename'] or config_name
+ service_c['uid'] = param_m['uid']
+ service_c['gid'] = param_m['gid']
+ service_c['mode'] = param_m['mode']
+ s.configs.append(service_c)
+
+ if ap['secrets'] is not None:
+ s.secrets = []
+ for param_m in ap['secrets']:
+ service_s = {}
+ secret_name = param_m['secret_name']
+ service_s['secret_id'] = param_m['secret_id'] or secret_ids[secret_name]
+ service_s['secret_name'] = secret_name
+ service_s['filename'] = param_m['filename'] or secret_name
+ service_s['uid'] = param_m['uid']
+ service_s['gid'] = param_m['gid']
+ service_s['mode'] = param_m['mode']
+ s.secrets.append(service_s)
+
+ return s
+
+ def compare(self, os):
+ differences = DifferenceTracker()
+ needs_rebuild = False
+ force_update = False
+ if self.endpoint_mode is not None and self.endpoint_mode != os.endpoint_mode:
+ differences.add('endpoint_mode', parameter=self.endpoint_mode, active=os.endpoint_mode)
+ if has_list_changed(self.env, os.env):
+ differences.add('env', parameter=self.env, active=os.env)
+ if self.log_driver is not None and self.log_driver != os.log_driver:
+ differences.add('log_driver', parameter=self.log_driver, active=os.log_driver)
+ if self.log_driver_options is not None and self.log_driver_options != (os.log_driver_options or {}):
+ differences.add('log_opt', parameter=self.log_driver_options, active=os.log_driver_options)
+ if self.mode != os.mode:
+ needs_rebuild = True
+ differences.add('mode', parameter=self.mode, active=os.mode)
+ if has_list_changed(self.mounts, os.mounts, sort_key='target'):
+ differences.add('mounts', parameter=self.mounts, active=os.mounts)
+ if has_list_changed(self.configs, os.configs, sort_key='config_name'):
+ differences.add('configs', parameter=self.configs, active=os.configs)
+ if has_list_changed(self.secrets, os.secrets, sort_key='secret_name'):
+ differences.add('secrets', parameter=self.secrets, active=os.secrets)
+ if have_networks_changed(self.networks, os.networks):
+ differences.add('networks', parameter=self.networks, active=os.networks)
+ needs_rebuild = not self.can_update_networks
+ if self.replicas != os.replicas:
+ differences.add('replicas', parameter=self.replicas, active=os.replicas)
+ if has_list_changed(self.command, os.command, sort_lists=False):
+ differences.add('command', parameter=self.command, active=os.command)
+ if has_list_changed(self.args, os.args, sort_lists=False):
+ differences.add('args', parameter=self.args, active=os.args)
+ if has_list_changed(self.constraints, os.constraints):
+ differences.add('constraints', parameter=self.constraints, active=os.constraints)
+ if has_list_changed(self.placement_preferences, os.placement_preferences, sort_lists=False):
+ differences.add('placement_preferences', parameter=self.placement_preferences, active=os.placement_preferences)
+ if has_list_changed(self.groups, os.groups):
+ differences.add('groups', parameter=self.groups, active=os.groups)
+ if self.labels is not None and self.labels != (os.labels or {}):
+ differences.add('labels', parameter=self.labels, active=os.labels)
+ if self.limit_cpu is not None and self.limit_cpu != os.limit_cpu:
+ differences.add('limit_cpu', parameter=self.limit_cpu, active=os.limit_cpu)
+ if self.limit_memory is not None and self.limit_memory != os.limit_memory:
+ differences.add('limit_memory', parameter=self.limit_memory, active=os.limit_memory)
+ if self.reserve_cpu is not None and self.reserve_cpu != os.reserve_cpu:
+ differences.add('reserve_cpu', parameter=self.reserve_cpu, active=os.reserve_cpu)
+ if self.reserve_memory is not None and self.reserve_memory != os.reserve_memory:
+ differences.add('reserve_memory', parameter=self.reserve_memory, active=os.reserve_memory)
+ if self.container_labels is not None and self.container_labels != (os.container_labels or {}):
+ differences.add('container_labels', parameter=self.container_labels, active=os.container_labels)
+ if self.stop_signal is not None and self.stop_signal != os.stop_signal:
+ differences.add('stop_signal', parameter=self.stop_signal, active=os.stop_signal)
+ if self.stop_grace_period is not None and self.stop_grace_period != os.stop_grace_period:
+ differences.add('stop_grace_period', parameter=self.stop_grace_period, active=os.stop_grace_period)
+ if self.has_publish_changed(os.publish):
+ differences.add('publish', parameter=self.publish, active=os.publish)
+ if self.read_only is not None and self.read_only != os.read_only:
+ differences.add('read_only', parameter=self.read_only, active=os.read_only)
+ if self.restart_policy is not None and self.restart_policy != os.restart_policy:
+ differences.add('restart_policy', parameter=self.restart_policy, active=os.restart_policy)
+ if self.restart_policy_attempts is not None and self.restart_policy_attempts != os.restart_policy_attempts:
+ differences.add('restart_policy_attempts', parameter=self.restart_policy_attempts, active=os.restart_policy_attempts)
+ if self.restart_policy_delay is not None and self.restart_policy_delay != os.restart_policy_delay:
+ differences.add('restart_policy_delay', parameter=self.restart_policy_delay, active=os.restart_policy_delay)
+ if self.restart_policy_window is not None and self.restart_policy_window != os.restart_policy_window:
+ differences.add('restart_policy_window', parameter=self.restart_policy_window, active=os.restart_policy_window)
+ if has_dict_changed(self.rollback_config, os.rollback_config):
+ differences.add('rollback_config', parameter=self.rollback_config, active=os.rollback_config)
+ if self.update_delay is not None and self.update_delay != os.update_delay:
+ differences.add('update_delay', parameter=self.update_delay, active=os.update_delay)
+ if self.update_parallelism is not None and self.update_parallelism != os.update_parallelism:
+ differences.add('update_parallelism', parameter=self.update_parallelism, active=os.update_parallelism)
+ if self.update_failure_action is not None and self.update_failure_action != os.update_failure_action:
+ differences.add('update_failure_action', parameter=self.update_failure_action, active=os.update_failure_action)
+ if self.update_monitor is not None and self.update_monitor != os.update_monitor:
+ differences.add('update_monitor', parameter=self.update_monitor, active=os.update_monitor)
+ if self.update_max_failure_ratio is not None and self.update_max_failure_ratio != os.update_max_failure_ratio:
+ differences.add('update_max_failure_ratio', parameter=self.update_max_failure_ratio, active=os.update_max_failure_ratio)
+ if self.update_order is not None and self.update_order != os.update_order:
+ differences.add('update_order', parameter=self.update_order, active=os.update_order)
+ has_image_changed, change = self.has_image_changed(os.image)
+ if has_image_changed:
+ differences.add('image', parameter=self.image, active=change)
+ if self.user and self.user != os.user:
+ differences.add('user', parameter=self.user, active=os.user)
+ if has_list_changed(self.dns, os.dns, sort_lists=False):
+ differences.add('dns', parameter=self.dns, active=os.dns)
+ if has_list_changed(self.dns_search, os.dns_search, sort_lists=False):
+ differences.add('dns_search', parameter=self.dns_search, active=os.dns_search)
+ if has_list_changed(self.dns_options, os.dns_options):
+ differences.add('dns_options', parameter=self.dns_options, active=os.dns_options)
+ if self.has_healthcheck_changed(os):
+ differences.add('healthcheck', parameter=self.healthcheck, active=os.healthcheck)
+ if self.hostname is not None and self.hostname != os.hostname:
+ differences.add('hostname', parameter=self.hostname, active=os.hostname)
+ if self.hosts is not None and self.hosts != (os.hosts or {}):
+ differences.add('hosts', parameter=self.hosts, active=os.hosts)
+ if self.tty is not None and self.tty != os.tty:
+ differences.add('tty', parameter=self.tty, active=os.tty)
+ if self.working_dir is not None and self.working_dir != os.working_dir:
+ differences.add('working_dir', parameter=self.working_dir, active=os.working_dir)
+ if self.force_update:
+ force_update = True
+ if self.init is not None and self.init != os.init:
+ differences.add('init', parameter=self.init, active=os.init)
+ return not differences.empty or force_update, differences, needs_rebuild, force_update
+
+ def has_healthcheck_changed(self, old_publish):
+ if self.healthcheck_disabled is False and self.healthcheck is None:
+ return False
+ if self.healthcheck_disabled:
+ if old_publish.healthcheck is None:
+ return False
+ if old_publish.healthcheck.get('test') == ['NONE']:
+ return False
+ return self.healthcheck != old_publish.healthcheck
+
+ def has_publish_changed(self, old_publish):
+ if self.publish is None:
+ return False
+ old_publish = old_publish or []
+ if len(self.publish) != len(old_publish):
+ return True
+ publish_sorter = operator.itemgetter('published_port', 'target_port', 'protocol')
+ publish = sorted(self.publish, key=publish_sorter)
+ old_publish = sorted(old_publish, key=publish_sorter)
+ for publish_item, old_publish_item in zip(publish, old_publish):
+ ignored_keys = set()
+ if not publish_item.get('mode'):
+ ignored_keys.add('mode')
+ # Create copies of publish_item dicts where keys specified in ignored_keys are left out
+ filtered_old_publish_item = dict(
+ (k, v) for k, v in old_publish_item.items() if k not in ignored_keys
+ )
+ filtered_publish_item = dict(
+ (k, v) for k, v in publish_item.items() if k not in ignored_keys
+ )
+ if filtered_publish_item != filtered_old_publish_item:
+ return True
+ return False
+
+ def has_image_changed(self, old_image):
+ if '@' not in self.image:
+ old_image = old_image.split('@')[0]
+ return self.image != old_image, old_image
+
+ def build_container_spec(self):
+ mounts = None
+ if self.mounts is not None:
+ mounts = []
+ for mount_config in self.mounts:
+ mount_options = {
+ 'target': 'target',
+ 'source': 'source',
+ 'type': 'type',
+ 'readonly': 'read_only',
+ 'propagation': 'propagation',
+ 'labels': 'labels',
+ 'no_copy': 'no_copy',
+ 'driver_config': 'driver_config',
+ 'tmpfs_size': 'tmpfs_size',
+ 'tmpfs_mode': 'tmpfs_mode'
+ }
+ mount_args = {}
+ for option, mount_arg in mount_options.items():
+ value = mount_config.get(option)
+ if value is not None:
+ mount_args[mount_arg] = value
+
+ mounts.append(types.Mount(**mount_args))
+
+ configs = None
+ if self.configs is not None:
+ configs = []
+ for config_config in self.configs:
+ config_args = {
+ 'config_id': config_config['config_id'],
+ 'config_name': config_config['config_name']
+ }
+ filename = config_config.get('filename')
+ if filename:
+ config_args['filename'] = filename
+ uid = config_config.get('uid')
+ if uid:
+ config_args['uid'] = uid
+ gid = config_config.get('gid')
+ if gid:
+ config_args['gid'] = gid
+ mode = config_config.get('mode')
+ if mode:
+ config_args['mode'] = mode
+
+ configs.append(types.ConfigReference(**config_args))
+
+ secrets = None
+ if self.secrets is not None:
+ secrets = []
+ for secret_config in self.secrets:
+ secret_args = {
+ 'secret_id': secret_config['secret_id'],
+ 'secret_name': secret_config['secret_name']
+ }
+ filename = secret_config.get('filename')
+ if filename:
+ secret_args['filename'] = filename
+ uid = secret_config.get('uid')
+ if uid:
+ secret_args['uid'] = uid
+ gid = secret_config.get('gid')
+ if gid:
+ secret_args['gid'] = gid
+ mode = secret_config.get('mode')
+ if mode:
+ secret_args['mode'] = mode
+
+ secrets.append(types.SecretReference(**secret_args))
+
+ dns_config_args = {}
+ if self.dns is not None:
+ dns_config_args['nameservers'] = self.dns
+ if self.dns_search is not None:
+ dns_config_args['search'] = self.dns_search
+ if self.dns_options is not None:
+ dns_config_args['options'] = self.dns_options
+ dns_config = types.DNSConfig(**dns_config_args) if dns_config_args else None
+
+ container_spec_args = {}
+ if self.command is not None:
+ container_spec_args['command'] = self.command
+ if self.args is not None:
+ container_spec_args['args'] = self.args
+ if self.env is not None:
+ container_spec_args['env'] = self.env
+ if self.user is not None:
+ container_spec_args['user'] = self.user
+ if self.container_labels is not None:
+ container_spec_args['labels'] = self.container_labels
+ if self.healthcheck is not None:
+ container_spec_args['healthcheck'] = types.Healthcheck(**self.healthcheck)
+ elif self.healthcheck_disabled:
+ container_spec_args['healthcheck'] = types.Healthcheck(test=['NONE'])
+ if self.hostname is not None:
+ container_spec_args['hostname'] = self.hostname
+ if self.hosts is not None:
+ container_spec_args['hosts'] = self.hosts
+ if self.read_only is not None:
+ container_spec_args['read_only'] = self.read_only
+ if self.stop_grace_period is not None:
+ container_spec_args['stop_grace_period'] = self.stop_grace_period
+ if self.stop_signal is not None:
+ container_spec_args['stop_signal'] = self.stop_signal
+ if self.tty is not None:
+ container_spec_args['tty'] = self.tty
+ if self.groups is not None:
+ container_spec_args['groups'] = self.groups
+ if self.working_dir is not None:
+ container_spec_args['workdir'] = self.working_dir
+ if secrets is not None:
+ container_spec_args['secrets'] = secrets
+ if mounts is not None:
+ container_spec_args['mounts'] = mounts
+ if dns_config is not None:
+ container_spec_args['dns_config'] = dns_config
+ if configs is not None:
+ container_spec_args['configs'] = configs
+ if self.init is not None:
+ container_spec_args['init'] = self.init
+
+ return types.ContainerSpec(self.image, **container_spec_args)
+
+ def build_placement(self):
+ placement_args = {}
+ if self.constraints is not None:
+ placement_args['constraints'] = self.constraints
+ if self.placement_preferences is not None:
+ placement_args['preferences'] = [
+ {key.title(): {'SpreadDescriptor': value}}
+ for preference in self.placement_preferences
+ for key, value in preference.items()
+ ]
+ return types.Placement(**placement_args) if placement_args else None
+
+ def build_update_config(self):
+ update_config_args = {}
+ if self.update_parallelism is not None:
+ update_config_args['parallelism'] = self.update_parallelism
+ if self.update_delay is not None:
+ update_config_args['delay'] = self.update_delay
+ if self.update_failure_action is not None:
+ update_config_args['failure_action'] = self.update_failure_action
+ if self.update_monitor is not None:
+ update_config_args['monitor'] = self.update_monitor
+ if self.update_max_failure_ratio is not None:
+ update_config_args['max_failure_ratio'] = self.update_max_failure_ratio
+ if self.update_order is not None:
+ update_config_args['order'] = self.update_order
+ return types.UpdateConfig(**update_config_args) if update_config_args else None
+
+ def build_log_driver(self):
+ log_driver_args = {}
+ if self.log_driver is not None:
+ log_driver_args['name'] = self.log_driver
+ if self.log_driver_options is not None:
+ log_driver_args['options'] = self.log_driver_options
+ return types.DriverConfig(**log_driver_args) if log_driver_args else None
+
+ def build_restart_policy(self):
+ restart_policy_args = {}
+ if self.restart_policy is not None:
+ restart_policy_args['condition'] = self.restart_policy
+ if self.restart_policy_delay is not None:
+ restart_policy_args['delay'] = self.restart_policy_delay
+ if self.restart_policy_attempts is not None:
+ restart_policy_args['max_attempts'] = self.restart_policy_attempts
+ if self.restart_policy_window is not None:
+ restart_policy_args['window'] = self.restart_policy_window
+ return types.RestartPolicy(**restart_policy_args) if restart_policy_args else None
+
+ def build_rollback_config(self):
+ if self.rollback_config is None:
+ return None
+ rollback_config_options = [
+ 'parallelism',
+ 'delay',
+ 'failure_action',
+ 'monitor',
+ 'max_failure_ratio',
+ 'order',
+ ]
+ rollback_config_args = {}
+ for option in rollback_config_options:
+ value = self.rollback_config.get(option)
+ if value is not None:
+ rollback_config_args[option] = value
+ return types.RollbackConfig(**rollback_config_args) if rollback_config_args else None
+
+ def build_resources(self):
+ resources_args = {}
+ if self.limit_cpu is not None:
+ resources_args['cpu_limit'] = int(self.limit_cpu * 1000000000.0)
+ if self.limit_memory is not None:
+ resources_args['mem_limit'] = self.limit_memory
+ if self.reserve_cpu is not None:
+ resources_args['cpu_reservation'] = int(self.reserve_cpu * 1000000000.0)
+ if self.reserve_memory is not None:
+ resources_args['mem_reservation'] = self.reserve_memory
+ return types.Resources(**resources_args) if resources_args else None
+
+ def build_task_template(self, container_spec, placement=None):
+ log_driver = self.build_log_driver()
+ restart_policy = self.build_restart_policy()
+ resources = self.build_resources()
+
+ task_template_args = {}
+ if placement is not None:
+ task_template_args['placement'] = placement
+ if log_driver is not None:
+ task_template_args['log_driver'] = log_driver
+ if restart_policy is not None:
+ task_template_args['restart_policy'] = restart_policy
+ if resources is not None:
+ task_template_args['resources'] = resources
+ if self.force_update:
+ task_template_args['force_update'] = self.force_update
+ if self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ task_template_args['networks'] = networks
+ return types.TaskTemplate(container_spec=container_spec, **task_template_args)
+
+ def build_service_mode(self):
+ if self.mode == 'global':
+ self.replicas = None
+ return types.ServiceMode(self.mode, replicas=self.replicas)
+
+ def build_networks(self):
+ networks = None
+ if self.networks is not None:
+ networks = []
+ for network in self.networks:
+ docker_network = {'Target': network['id']}
+ if 'aliases' in network:
+ docker_network['Aliases'] = network['aliases']
+ if 'options' in network:
+ docker_network['DriverOpts'] = network['options']
+ networks.append(docker_network)
+ return networks
+
+ def build_endpoint_spec(self):
+ endpoint_spec_args = {}
+ if self.publish is not None:
+ ports = []
+ for port in self.publish:
+ port_spec = {
+ 'Protocol': port['protocol'],
+ 'PublishedPort': port['published_port'],
+ 'TargetPort': port['target_port']
+ }
+ if port.get('mode'):
+ port_spec['PublishMode'] = port['mode']
+ ports.append(port_spec)
+ endpoint_spec_args['ports'] = ports
+ if self.endpoint_mode is not None:
+ endpoint_spec_args['mode'] = self.endpoint_mode
+ return types.EndpointSpec(**endpoint_spec_args) if endpoint_spec_args else None
+
+ def build_docker_service(self):
+ container_spec = self.build_container_spec()
+ placement = self.build_placement()
+ task_template = self.build_task_template(container_spec, placement)
+
+ update_config = self.build_update_config()
+ rollback_config = self.build_rollback_config()
+ service_mode = self.build_service_mode()
+ endpoint_spec = self.build_endpoint_spec()
+
+ service = {'task_template': task_template, 'mode': service_mode}
+ if update_config:
+ service['update_config'] = update_config
+ if rollback_config:
+ service['rollback_config'] = rollback_config
+ if endpoint_spec:
+ service['endpoint_spec'] = endpoint_spec
+ if self.labels:
+ service['labels'] = self.labels
+ if not self.can_use_task_template_networks:
+ networks = self.build_networks()
+ if networks:
+ service['networks'] = networks
+ return service
+
+
+class DockerServiceManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.retries = 2
+ self.diff_tracker = None
+
+ def get_service(self, name):
+ try:
+ raw_data = self.client.inspect_service(name)
+ except NotFound:
+ return None
+ ds = DockerService(self.client.docker_api_version, self.client.docker_py_version)
+
+ task_template_data = raw_data['Spec']['TaskTemplate']
+ ds.image = task_template_data['ContainerSpec']['Image']
+ ds.user = task_template_data['ContainerSpec'].get('User')
+ ds.env = task_template_data['ContainerSpec'].get('Env')
+ ds.command = task_template_data['ContainerSpec'].get('Command')
+ ds.args = task_template_data['ContainerSpec'].get('Args')
+ ds.groups = task_template_data['ContainerSpec'].get('Groups')
+ ds.stop_grace_period = task_template_data['ContainerSpec'].get('StopGracePeriod')
+ ds.stop_signal = task_template_data['ContainerSpec'].get('StopSignal')
+ ds.working_dir = task_template_data['ContainerSpec'].get('Dir')
+ ds.read_only = task_template_data['ContainerSpec'].get('ReadOnly')
+
+ healthcheck_data = task_template_data['ContainerSpec'].get('Healthcheck')
+ if healthcheck_data:
+ options = {
+ 'Test': 'test',
+ 'Interval': 'interval',
+ 'Timeout': 'timeout',
+ 'StartPeriod': 'start_period',
+ 'Retries': 'retries'
+ }
+ healthcheck = dict(
+ (options[key], value) for key, value in healthcheck_data.items()
+ if value is not None and key in options
+ )
+ ds.healthcheck = healthcheck
+
+ update_config_data = raw_data['Spec'].get('UpdateConfig')
+ if update_config_data:
+ ds.update_delay = update_config_data.get('Delay')
+ ds.update_parallelism = update_config_data.get('Parallelism')
+ ds.update_failure_action = update_config_data.get('FailureAction')
+ ds.update_monitor = update_config_data.get('Monitor')
+ ds.update_max_failure_ratio = update_config_data.get('MaxFailureRatio')
+ ds.update_order = update_config_data.get('Order')
+
+ rollback_config_data = raw_data['Spec'].get('RollbackConfig')
+ if rollback_config_data:
+ ds.rollback_config = {
+ 'parallelism': rollback_config_data.get('Parallelism'),
+ 'delay': rollback_config_data.get('Delay'),
+ 'failure_action': rollback_config_data.get('FailureAction'),
+ 'monitor': rollback_config_data.get('Monitor'),
+ 'max_failure_ratio': rollback_config_data.get('MaxFailureRatio'),
+ 'order': rollback_config_data.get('Order'),
+ }
+
+ dns_config = task_template_data['ContainerSpec'].get('DNSConfig')
+ if dns_config:
+ ds.dns = dns_config.get('Nameservers')
+ ds.dns_search = dns_config.get('Search')
+ ds.dns_options = dns_config.get('Options')
+
+ ds.hostname = task_template_data['ContainerSpec'].get('Hostname')
+
+ hosts = task_template_data['ContainerSpec'].get('Hosts')
+ if hosts:
+ hosts = [
+ list(reversed(host.split(":", 1)))
+ if ":" in host
+ else host.split(" ", 1)
+ for host in hosts
+ ]
+ ds.hosts = dict((hostname, ip) for ip, hostname in hosts)
+ ds.tty = task_template_data['ContainerSpec'].get('TTY')
+
+ placement = task_template_data.get('Placement')
+ if placement:
+ ds.constraints = placement.get('Constraints')
+ placement_preferences = []
+ for preference in placement.get('Preferences', []):
+ placement_preferences.append(
+ dict(
+ (key.lower(), value['SpreadDescriptor'])
+ for key, value in preference.items()
+ )
+ )
+ ds.placement_preferences = placement_preferences or None
+
+ restart_policy_data = task_template_data.get('RestartPolicy')
+ if restart_policy_data:
+ ds.restart_policy = restart_policy_data.get('Condition')
+ ds.restart_policy_delay = restart_policy_data.get('Delay')
+ ds.restart_policy_attempts = restart_policy_data.get('MaxAttempts')
+ ds.restart_policy_window = restart_policy_data.get('Window')
+
+ raw_data_endpoint_spec = raw_data['Spec'].get('EndpointSpec')
+ if raw_data_endpoint_spec:
+ ds.endpoint_mode = raw_data_endpoint_spec.get('Mode')
+ raw_data_ports = raw_data_endpoint_spec.get('Ports')
+ if raw_data_ports:
+ ds.publish = []
+ for port in raw_data_ports:
+ ds.publish.append({
+ 'protocol': port['Protocol'],
+ 'mode': port.get('PublishMode', None),
+ 'published_port': int(port['PublishedPort']),
+ 'target_port': int(port['TargetPort'])
+ })
+
+ raw_data_limits = task_template_data.get('Resources', {}).get('Limits')
+ if raw_data_limits:
+ raw_cpu_limits = raw_data_limits.get('NanoCPUs')
+ if raw_cpu_limits:
+ ds.limit_cpu = float(raw_cpu_limits) / 1000000000
+
+ raw_memory_limits = raw_data_limits.get('MemoryBytes')
+ if raw_memory_limits:
+ ds.limit_memory = int(raw_memory_limits)
+
+ raw_data_reservations = task_template_data.get('Resources', {}).get('Reservations')
+ if raw_data_reservations:
+ raw_cpu_reservations = raw_data_reservations.get('NanoCPUs')
+ if raw_cpu_reservations:
+ ds.reserve_cpu = float(raw_cpu_reservations) / 1000000000
+
+ raw_memory_reservations = raw_data_reservations.get('MemoryBytes')
+ if raw_memory_reservations:
+ ds.reserve_memory = int(raw_memory_reservations)
+
+ ds.labels = raw_data['Spec'].get('Labels')
+ ds.log_driver = task_template_data.get('LogDriver', {}).get('Name')
+ ds.log_driver_options = task_template_data.get('LogDriver', {}).get('Options')
+ ds.container_labels = task_template_data['ContainerSpec'].get('Labels')
+
+ mode = raw_data['Spec']['Mode']
+ if 'Replicated' in mode.keys():
+ ds.mode = to_text('replicated', encoding='utf-8')
+ ds.replicas = mode['Replicated']['Replicas']
+ elif 'Global' in mode.keys():
+ ds.mode = 'global'
+ else:
+ raise Exception('Unknown service mode: %s' % mode)
+
+ raw_data_mounts = task_template_data['ContainerSpec'].get('Mounts')
+ if raw_data_mounts:
+ ds.mounts = []
+ for mount_data in raw_data_mounts:
+ bind_options = mount_data.get('BindOptions', {})
+ volume_options = mount_data.get('VolumeOptions', {})
+ tmpfs_options = mount_data.get('TmpfsOptions', {})
+ driver_config = volume_options.get('DriverConfig', {})
+ driver_config = dict(
+ (key.lower(), value) for key, value in driver_config.items()
+ ) or None
+ ds.mounts.append({
+ 'source': mount_data.get('Source', ''),
+ 'type': mount_data['Type'],
+ 'target': mount_data['Target'],
+ 'readonly': mount_data.get('ReadOnly'),
+ 'propagation': bind_options.get('Propagation'),
+ 'no_copy': volume_options.get('NoCopy'),
+ 'labels': volume_options.get('Labels'),
+ 'driver_config': driver_config,
+ 'tmpfs_mode': tmpfs_options.get('Mode'),
+ 'tmpfs_size': tmpfs_options.get('SizeBytes'),
+ })
+
+ raw_data_configs = task_template_data['ContainerSpec'].get('Configs')
+ if raw_data_configs:
+ ds.configs = []
+ for config_data in raw_data_configs:
+ ds.configs.append({
+ 'config_id': config_data['ConfigID'],
+ 'config_name': config_data['ConfigName'],
+ 'filename': config_data['File'].get('Name'),
+ 'uid': config_data['File'].get('UID'),
+ 'gid': config_data['File'].get('GID'),
+ 'mode': config_data['File'].get('Mode')
+ })
+
+ raw_data_secrets = task_template_data['ContainerSpec'].get('Secrets')
+ if raw_data_secrets:
+ ds.secrets = []
+ for secret_data in raw_data_secrets:
+ ds.secrets.append({
+ 'secret_id': secret_data['SecretID'],
+ 'secret_name': secret_data['SecretName'],
+ 'filename': secret_data['File'].get('Name'),
+ 'uid': secret_data['File'].get('UID'),
+ 'gid': secret_data['File'].get('GID'),
+ 'mode': secret_data['File'].get('Mode')
+ })
+
+ raw_networks_data = task_template_data.get('Networks', raw_data['Spec'].get('Networks'))
+ if raw_networks_data:
+ ds.networks = []
+ for network_data in raw_networks_data:
+ network = {'id': network_data['Target']}
+ if 'Aliases' in network_data:
+ network['aliases'] = network_data['Aliases']
+ if 'DriverOpts' in network_data:
+ network['options'] = network_data['DriverOpts']
+ ds.networks.append(network)
+ ds.service_version = raw_data['Version']['Index']
+ ds.service_id = raw_data['ID']
+
+ ds.init = task_template_data['ContainerSpec'].get('Init', False)
+ return ds
+
+ def update_service(self, name, old_service, new_service):
+ service_data = new_service.build_docker_service()
+ result = self.client.update_service(
+ old_service.service_id,
+ old_service.service_version,
+ name=name,
+ **service_data
+ )
+ # Prior to Docker SDK 4.0.0 no warnings were returned and will thus be ignored.
+ # (see https://github.com/docker/docker-py/pull/2272)
+ self.client.report_warnings(result, ['Warning'])
+
+ def create_service(self, name, service):
+ service_data = service.build_docker_service()
+ result = self.client.create_service(name=name, **service_data)
+ self.client.report_warnings(result, ['Warning'])
+
+ def remove_service(self, name):
+ self.client.remove_service(name)
+
+ def get_image_digest(self, name, resolve=False):
+ if (
+ not name
+ or not resolve
+ ):
+ return name
+ repo, tag = parse_repository_tag(name)
+ if not tag:
+ tag = 'latest'
+ name = repo + ':' + tag
+ distribution_data = self.client.inspect_distribution(name)
+ digest = distribution_data['Descriptor']['digest']
+ return '%s@%s' % (name, digest)
+
+ def get_networks_names_ids(self):
+ return dict(
+ (network['Name'], network['Id']) for network in self.client.networks()
+ )
+
+ def get_missing_secret_ids(self):
+ """
+ Resolve missing secret ids by looking them up by name
+ """
+ secret_names = [
+ secret['secret_name']
+ for secret in self.client.module.params.get('secrets') or []
+ if secret['secret_id'] is None
+ ]
+ if not secret_names:
+ return {}
+ secrets = self.client.secrets(filters={'name': secret_names})
+ secrets = dict(
+ (secret['Spec']['Name'], secret['ID'])
+ for secret in secrets
+ if secret['Spec']['Name'] in secret_names
+ )
+ for secret_name in secret_names:
+ if secret_name not in secrets:
+ self.client.fail(
+ 'Could not find a secret named "%s"' % secret_name
+ )
+ return secrets
+
+ def get_missing_config_ids(self):
+ """
+ Resolve missing config ids by looking them up by name
+ """
+ config_names = [
+ config['config_name']
+ for config in self.client.module.params.get('configs') or []
+ if config['config_id'] is None
+ ]
+ if not config_names:
+ return {}
+ configs = self.client.configs(filters={'name': config_names})
+ configs = dict(
+ (config['Spec']['Name'], config['ID'])
+ for config in configs
+ if config['Spec']['Name'] in config_names
+ )
+ for config_name in config_names:
+ if config_name not in configs:
+ self.client.fail(
+ 'Could not find a config named "%s"' % config_name
+ )
+ return configs
+
+ def run(self):
+ self.diff_tracker = DifferenceTracker()
+ module = self.client.module
+
+ image = module.params['image']
+ try:
+ image_digest = self.get_image_digest(
+ name=image,
+ resolve=module.params['resolve_image']
+ )
+ except DockerException as e:
+ self.client.fail(
+ 'Error looking for an image named %s: %s'
+ % (image, e)
+ )
+
+ try:
+ current_service = self.get_service(module.params['name'])
+ except Exception as e:
+ self.client.fail(
+ 'Error looking for service named %s: %s'
+ % (module.params['name'], e)
+ )
+ try:
+ secret_ids = self.get_missing_secret_ids()
+ config_ids = self.get_missing_config_ids()
+ network_ids = self.get_networks_names_ids()
+ new_service = DockerService.from_ansible_params(
+ module.params,
+ current_service,
+ image_digest,
+ secret_ids,
+ config_ids,
+ network_ids,
+ self.client.docker_api_version,
+ self.client.docker_py_version
+ )
+ except Exception as e:
+ return self.client.fail(
+ 'Error parsing module parameters: %s' % e
+ )
+
+ changed = False
+ msg = 'noop'
+ rebuilt = False
+ differences = DifferenceTracker()
+ facts = {}
+
+ if current_service:
+ if module.params['state'] == 'absent':
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ msg = 'Service removed'
+ changed = True
+ else:
+ changed, differences, need_rebuild, force_update = new_service.compare(
+ current_service
+ )
+ if changed:
+ self.diff_tracker.merge(differences)
+ if need_rebuild:
+ if not module.check_mode:
+ self.remove_service(module.params['name'])
+ self.create_service(
+ module.params['name'],
+ new_service
+ )
+ msg = 'Service rebuilt'
+ rebuilt = True
+ else:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service updated'
+ rebuilt = False
+ else:
+ if force_update:
+ if not module.check_mode:
+ self.update_service(
+ module.params['name'],
+ current_service,
+ new_service
+ )
+ msg = 'Service forcefully updated'
+ rebuilt = False
+ changed = True
+ else:
+ msg = 'Service unchanged'
+ facts = new_service.get_facts()
+ else:
+ if module.params['state'] == 'absent':
+ msg = 'Service absent'
+ else:
+ if not module.check_mode:
+ self.create_service(module.params['name'], new_service)
+ msg = 'Service created'
+ changed = True
+ facts = new_service.get_facts()
+
+ return msg, changed, rebuilt, differences.get_legacy_docker_diffs(), facts
+
+ def run_safe(self):
+ while True:
+ try:
+ return self.run()
+ except APIError as e:
+ # Sometimes Version.Index will have changed between an inspect and
+ # update. If this is encountered we'll retry the update.
+ if self.retries > 0 and 'update out of sequence' in str(e.explanation):
+ self.retries -= 1
+ time.sleep(1)
+ else:
+ raise
+
+
+def _detect_publish_mode_usage(client):
+ for publish_def in client.module.params['publish'] or []:
+ if publish_def.get('mode'):
+ return True
+ return False
+
+
+def _detect_healthcheck_start_period(client):
+ if client.module.params['healthcheck']:
+ return client.module.params['healthcheck']['start_period'] is not None
+ return False
+
+
+def _detect_mount_tmpfs_usage(client):
+ for mount in client.module.params['mounts'] or []:
+ if mount.get('type') == 'tmpfs':
+ return True
+ if mount.get('tmpfs_size') is not None:
+ return True
+ if mount.get('tmpfs_mode') is not None:
+ return True
+ return False
+
+
+def _detect_update_config_failure_action_rollback(client):
+ rollback_config_failure_action = (
+ (client.module.params['update_config'] or {}).get('failure_action')
+ )
+ update_failure_action = client.module.params['update_failure_action']
+ failure_action = rollback_config_failure_action or update_failure_action
+ return failure_action == 'rollback'
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ image=dict(type='str'),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ mounts=dict(type='list', elements='dict', options=dict(
+ source=dict(type='str'),
+ target=dict(type='str', required=True),
+ type=dict(
+ type='str',
+ default='bind',
+ choices=['bind', 'volume', 'tmpfs', 'npipe'],
+ ),
+ readonly=dict(type='bool'),
+ labels=dict(type='dict'),
+ propagation=dict(
+ type='str',
+ choices=[
+ 'shared',
+ 'slave',
+ 'private',
+ 'rshared',
+ 'rslave',
+ 'rprivate'
+ ]
+ ),
+ no_copy=dict(type='bool'),
+ driver_config=dict(type='dict', options=dict(
+ name=dict(type='str'),
+ options=dict(type='dict')
+ )),
+ tmpfs_size=dict(type='str'),
+ tmpfs_mode=dict(type='int')
+ )),
+ configs=dict(type='list', elements='dict', options=dict(
+ config_id=dict(type='str'),
+ config_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ secrets=dict(type='list', elements='dict', options=dict(
+ secret_id=dict(type='str'),
+ secret_name=dict(type='str', required=True),
+ filename=dict(type='str'),
+ uid=dict(type='str'),
+ gid=dict(type='str'),
+ mode=dict(type='int'),
+ )),
+ networks=dict(type='list', elements='raw'),
+ command=dict(type='raw'),
+ args=dict(type='list', elements='str'),
+ env=dict(type='raw'),
+ env_files=dict(type='list', elements='path'),
+ force_update=dict(type='bool', default=False),
+ groups=dict(type='list', elements='str'),
+ logging=dict(type='dict', options=dict(
+ driver=dict(type='str'),
+ options=dict(type='dict'),
+ )),
+ publish=dict(type='list', elements='dict', options=dict(
+ published_port=dict(type='int', required=True),
+ target_port=dict(type='int', required=True),
+ protocol=dict(type='str', default='tcp', choices=['tcp', 'udp']),
+ mode=dict(type='str', choices=['ingress', 'host']),
+ )),
+ placement=dict(type='dict', options=dict(
+ constraints=dict(type='list', elements='str'),
+ preferences=dict(type='list', elements='dict'),
+ )),
+ tty=dict(type='bool'),
+ dns=dict(type='list', elements='str'),
+ dns_search=dict(type='list', elements='str'),
+ dns_options=dict(type='list', elements='str'),
+ healthcheck=dict(type='dict', options=dict(
+ test=dict(type='raw'),
+ interval=dict(type='str'),
+ timeout=dict(type='str'),
+ start_period=dict(type='str'),
+ retries=dict(type='int'),
+ )),
+ hostname=dict(type='str'),
+ hosts=dict(type='dict'),
+ labels=dict(type='dict'),
+ container_labels=dict(type='dict'),
+ mode=dict(
+ type='str',
+ default='replicated',
+ choices=['replicated', 'global']
+ ),
+ replicas=dict(type='int', default=-1),
+ endpoint_mode=dict(type='str', choices=['vip', 'dnsrr']),
+ stop_grace_period=dict(type='str'),
+ stop_signal=dict(type='str'),
+ limits=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ read_only=dict(type='bool'),
+ reservations=dict(type='dict', options=dict(
+ cpus=dict(type='float'),
+ memory=dict(type='str'),
+ )),
+ resolve_image=dict(type='bool', default=False),
+ restart_config=dict(type='dict', options=dict(
+ condition=dict(type='str', choices=['none', 'on-failure', 'any']),
+ delay=dict(type='str'),
+ max_attempts=dict(type='int'),
+ window=dict(type='str'),
+ )),
+ rollback_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ update_config=dict(type='dict', options=dict(
+ parallelism=dict(type='int'),
+ delay=dict(type='str'),
+ failure_action=dict(
+ type='str',
+ choices=['continue', 'pause', 'rollback']
+ ),
+ monitor=dict(type='str'),
+ max_failure_ratio=dict(type='float'),
+ order=dict(type='str'),
+ )),
+ user=dict(type='str'),
+ working_dir=dict(type='str'),
+ init=dict(type='bool'),
+ )
+
+ option_minimal_versions = dict(
+ dns=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_options=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ dns_search=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ endpoint_mode=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ force_update=dict(docker_py_version='2.1.0', docker_api_version='1.25'),
+ healthcheck=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ hostname=dict(docker_py_version='2.2.0', docker_api_version='1.25'),
+ hosts=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ groups=dict(docker_py_version='2.6.0', docker_api_version='1.25'),
+ tty=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ secrets=dict(docker_py_version='2.4.0', docker_api_version='1.25'),
+ configs=dict(docker_py_version='2.6.0', docker_api_version='1.30'),
+ stop_signal=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ publish=dict(docker_py_version='3.0.0', docker_api_version='1.25'),
+ read_only=dict(docker_py_version='2.6.0', docker_api_version='1.28'),
+ resolve_image=dict(docker_api_version='1.30', docker_py_version='3.2.0'),
+ rollback_config=dict(docker_py_version='3.5.0', docker_api_version='1.28'),
+ init=dict(docker_py_version='4.0.0', docker_api_version='1.37'),
+ # specials
+ publish_mode=dict(
+ docker_py_version='3.0.0',
+ docker_api_version='1.25',
+ detect_usage=_detect_publish_mode_usage,
+ usage_msg='set publish.mode'
+ ),
+ healthcheck_start_period=dict(
+ docker_py_version='2.6.0',
+ docker_api_version='1.29',
+ detect_usage=_detect_healthcheck_start_period,
+ usage_msg='set healthcheck.start_period'
+ ),
+ update_config_max_failure_ratio=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'max_failure_ratio'
+ ) is not None,
+ usage_msg='set update_config.max_failure_ratio'
+ ),
+ update_config_failure_action=dict(
+ docker_py_version='3.5.0',
+ docker_api_version='1.28',
+ detect_usage=_detect_update_config_failure_action_rollback,
+ usage_msg='set update_config.failure_action.rollback'
+ ),
+ update_config_monitor=dict(
+ docker_py_version='2.1.0',
+ docker_api_version='1.25',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'monitor'
+ ) is not None,
+ usage_msg='set update_config.monitor'
+ ),
+ update_config_order=dict(
+ docker_py_version='2.7.0',
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['update_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set update_config.order'
+ ),
+ placement_config_preferences=dict(
+ docker_py_version='2.4.0',
+ docker_api_version='1.27',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'preferences'
+ ) is not None,
+ usage_msg='set placement.preferences'
+ ),
+ placement_config_constraints=dict(
+ docker_py_version='2.4.0',
+ detect_usage=lambda c: (c.module.params['placement'] or {}).get(
+ 'constraints'
+ ) is not None,
+ usage_msg='set placement.constraints'
+ ),
+ mounts_tmpfs=dict(
+ docker_py_version='2.6.0',
+ detect_usage=_detect_mount_tmpfs_usage,
+ usage_msg='set mounts.tmpfs'
+ ),
+ rollback_config_order=dict(
+ docker_api_version='1.29',
+ detect_usage=lambda c: (c.module.params['rollback_config'] or {}).get(
+ 'order'
+ ) is not None,
+ usage_msg='set rollback_config.order'
+ ),
+ )
+ required_if = [
+ ('state', 'present', ['image'])
+ ]
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ required_if=required_if,
+ supports_check_mode=True,
+ min_docker_version='2.0.2',
+ min_docker_api_version='1.24',
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ dsm = DockerServiceManager(client)
+ msg, changed, rebuilt, changes, facts = dsm.run_safe()
+
+ results = dict(
+ msg=msg,
+ changed=changed,
+ rebuilt=rebuilt,
+ changes=changes,
+ swarm_service=facts,
+ )
+ if client.module._diff:
+ before, after = dsm.diff_tracker.get_before_after()
+ results['diff'] = dict(before=before, after=after)
+
+ client.module.exit_json(**results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py
new file mode 100644
index 00000000..b1b25caa
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py
@@ -0,0 +1,115 @@
+#!/usr/bin/python
+#
+# (c) 2019 Hannes Ljungberg <hannes.ljungberg@gmail.com>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+---
+module: docker_swarm_service_info
+
+short_description: Retrieves information about docker services from a Swarm Manager
+
+description:
+ - Retrieves information about a docker service.
+ - Essentially returns the output of C(docker service inspect <name>).
+ - Must be executed on a host running as Swarm Manager, otherwise the module will fail.
+
+
+options:
+ name:
+ description:
+ - The name of the service to inspect.
+ type: str
+ required: yes
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+author:
+ - Hannes Ljungberg (@hannseman)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 2.0.0"
+ - "Docker API >= 1.24"
+'''
+
+EXAMPLES = '''
+- name: Get info from a service
+ community.docker.docker_swarm_service_info:
+ name: myservice
+ register: result
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the service exists.
+ type: bool
+ returned: always
+ sample: true
+service:
+ description:
+ - A dictionary representing the current state of the service. Matches the C(docker service inspect) output.
+ - Will be C(none) if service does not exist.
+ returned: always
+ type: dict
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ RequestException,
+)
+
+from ansible_collections.community.docker.plugins.module_utils.swarm import AnsibleDockerSwarmClient
+
+
+def get_service_info(client):
+ service = client.module.params['name']
+ return client.get_service_inspect(
+ service_id=service,
+ skip_missing=True
+ )
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True),
+ )
+
+ client = AnsibleDockerSwarmClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='2.0.0',
+ min_docker_api_version='1.24',
+ )
+
+ client.fail_task_if_not_swarm_manager()
+
+ try:
+ service = get_service_info(client)
+
+ client.module.exit_json(
+ changed=False,
+ service=service,
+ exists=bool(service)
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume.py
new file mode 100644
index 00000000..e504a591
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume.py
@@ -0,0 +1,308 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume
+short_description: Manage Docker volumes
+description:
+ - Create/remove Docker volumes.
+ - Performs largely the same function as the "docker volume" CLI subcommand.
+options:
+ volume_name:
+ description:
+ - Name of the volume to operate on.
+ type: str
+ required: yes
+ aliases:
+ - name
+
+ driver:
+ description:
+ - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used.
+ type: str
+ default: local
+
+ driver_options:
+ description:
+ - "Dictionary of volume settings. Consult docker docs for valid options and values:
+ U(https://docs.docker.com/engine/reference/commandline/volume_create/#driver-specific-options)"
+ type: dict
+
+ labels:
+ description:
+ - Dictionary of label key/values to set for the volume
+ type: dict
+
+ recreate:
+ description:
+ - Controls when a volume will be recreated when I(state) is C(present). Please
+ note that recreating an existing volume will cause **any data in the existing volume
+ to be lost!** The volume will be deleted and a new volume with the same name will be
+ created.
+ - The value C(always) forces the volume to be always recreated.
+ - The value C(never) makes sure the volume will not be recreated.
+ - The value C(options-changed) makes sure the volume will be recreated if the volume
+ already exist and the driver, driver options or labels differ.
+ type: str
+ default: never
+ choices:
+ - always
+ - never
+ - options-changed
+
+ state:
+ description:
+ - C(absent) deletes the volume.
+ - C(present) creates the volume, if it does not already exist.
+ type: str
+ default: present
+ choices:
+ - absent
+ - present
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+author:
+ - Alex Grönholm (@agronholm)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "The docker server >= 1.9.0"
+'''
+
+EXAMPLES = '''
+- name: Create a volume
+ community.docker.docker_volume:
+ name: volume_one
+
+- name: Remove a volume
+ community.docker.docker_volume:
+ name: volume_one
+ state: absent
+
+- name: Create a volume with options
+ community.docker.docker_volume:
+ name: volume_two
+ driver_options:
+ type: btrfs
+ device: /dev/sda2
+'''
+
+RETURN = '''
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ returned: success
+ type: dict
+ sample: {}
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, APIError
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ DockerBaseClass,
+ AnsibleDockerClient,
+ DifferenceTracker,
+ RequestException,
+)
+from ansible.module_utils.six import iteritems, text_type
+
+
+class TaskParameters(DockerBaseClass):
+ def __init__(self, client):
+ super(TaskParameters, self).__init__()
+ self.client = client
+
+ self.volume_name = None
+ self.driver = None
+ self.driver_options = None
+ self.labels = None
+ self.recreate = None
+ self.debug = None
+
+ for key, value in iteritems(client.module.params):
+ setattr(self, key, value)
+
+
+class DockerVolumeManager(object):
+
+ def __init__(self, client):
+ self.client = client
+ self.parameters = TaskParameters(client)
+ self.check_mode = self.client.check_mode
+ self.results = {
+ u'changed': False,
+ u'actions': []
+ }
+ self.diff = self.client.module._diff
+ self.diff_tracker = DifferenceTracker()
+ self.diff_result = dict()
+
+ self.existing_volume = self.get_existing_volume()
+
+ state = self.parameters.state
+ if state == 'present':
+ self.present()
+ elif state == 'absent':
+ self.absent()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ if self.diff:
+ self.diff_result['before'], self.diff_result['after'] = self.diff_tracker.get_before_after()
+ self.results['diff'] = self.diff_result
+
+ def get_existing_volume(self):
+ try:
+ volumes = self.client.volumes()
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ if volumes[u'Volumes'] is None:
+ return None
+
+ for volume in volumes[u'Volumes']:
+ if volume['Name'] == self.parameters.volume_name:
+ return volume
+
+ return None
+
+ def has_different_config(self):
+ """
+ Return the list of differences between the current parameters and the existing volume.
+
+ :return: list of options that differ
+ """
+ differences = DifferenceTracker()
+ if self.parameters.driver and self.parameters.driver != self.existing_volume['Driver']:
+ differences.add('driver', parameter=self.parameters.driver, active=self.existing_volume['Driver'])
+ if self.parameters.driver_options:
+ if not self.existing_volume.get('Options'):
+ differences.add('driver_options',
+ parameter=self.parameters.driver_options,
+ active=self.existing_volume.get('Options'))
+ else:
+ for key, value in iteritems(self.parameters.driver_options):
+ if (not self.existing_volume['Options'].get(key) or
+ value != self.existing_volume['Options'][key]):
+ differences.add('driver_options.%s' % key,
+ parameter=value,
+ active=self.existing_volume['Options'].get(key))
+ if self.parameters.labels:
+ existing_labels = self.existing_volume.get('Labels', {})
+ for label in self.parameters.labels:
+ if existing_labels.get(label) != self.parameters.labels.get(label):
+ differences.add('labels.%s' % label,
+ parameter=self.parameters.labels.get(label),
+ active=existing_labels.get(label))
+
+ return differences
+
+ def create_volume(self):
+ if not self.existing_volume:
+ if not self.check_mode:
+ try:
+ params = dict(
+ driver=self.parameters.driver,
+ driver_opts=self.parameters.driver_options,
+ )
+
+ if self.parameters.labels is not None:
+ params['labels'] = self.parameters.labels
+
+ resp = self.client.create_volume(self.parameters.volume_name, **params)
+ self.existing_volume = self.client.inspect_volume(resp['Name'])
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Created volume %s with driver %s" % (self.parameters.volume_name, self.parameters.driver))
+ self.results['changed'] = True
+
+ def remove_volume(self):
+ if self.existing_volume:
+ if not self.check_mode:
+ try:
+ self.client.remove_volume(self.parameters.volume_name)
+ except APIError as e:
+ self.client.fail(text_type(e))
+
+ self.results['actions'].append("Removed volume %s" % self.parameters.volume_name)
+ self.results['changed'] = True
+
+ def present(self):
+ differences = DifferenceTracker()
+ if self.existing_volume:
+ differences = self.has_different_config()
+
+ self.diff_tracker.add('exists', parameter=True, active=self.existing_volume is not None)
+ if (not differences.empty and self.parameters.recreate == 'options-changed') or self.parameters.recreate == 'always':
+ self.remove_volume()
+ self.existing_volume = None
+
+ self.create_volume()
+
+ if self.diff or self.check_mode or self.parameters.debug:
+ self.diff_result['differences'] = differences.get_legacy_docker_diffs()
+ self.diff_tracker.merge(differences)
+
+ if not self.check_mode and not self.parameters.debug:
+ self.results.pop('actions')
+
+ volume_facts = self.get_existing_volume()
+ self.results['volume'] = volume_facts
+
+ def absent(self):
+ self.diff_tracker.add('exists', parameter=False, active=self.existing_volume is not None)
+ self.remove_volume()
+
+
+def main():
+ argument_spec = dict(
+ volume_name=dict(type='str', required=True, aliases=['name']),
+ state=dict(type='str', default='present', choices=['present', 'absent']),
+ driver=dict(type='str', default='local'),
+ driver_options=dict(type='dict', default={}),
+ labels=dict(type='dict'),
+ recreate=dict(type='str', default='never', choices=['always', 'never', 'options-changed']),
+ debug=dict(type='bool', default=False)
+ )
+
+ option_minimal_versions = dict(
+ labels=dict(docker_py_version='1.10.0', docker_api_version='1.23'),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.10.0',
+ min_docker_api_version='1.21',
+ # "The docker server >= 1.9.0"
+ option_minimal_versions=option_minimal_versions,
+ )
+
+ try:
+ cm = DockerVolumeManager(client)
+ client.module.exit_json(**cm.results)
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume_info.py b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume_info.py
new file mode 100644
index 00000000..8e1734c2
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/modules/docker_volume_info.py
@@ -0,0 +1,128 @@
+#!/usr/bin/python
+# coding: utf-8
+#
+# Copyright 2017 Red Hat | Ansible, Alex Grönholm <alex.gronholm@nextday.fi>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import absolute_import, division, print_function
+__metaclass__ = type
+
+
+DOCUMENTATION = '''
+module: docker_volume_info
+short_description: Retrieve facts about Docker volumes
+description:
+ - Performs largely the same function as the "docker volume inspect" CLI subcommand.
+options:
+ name:
+ description:
+ - Name of the volume to inspect.
+ type: str
+ required: yes
+ aliases:
+ - volume_name
+
+extends_documentation_fragment:
+- community.docker.docker
+- community.docker.docker.docker_py_1_documentation
+
+
+author:
+ - Felix Fontein (@felixfontein)
+
+requirements:
+ - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0 (use L(docker-py,https://pypi.org/project/docker-py/) for Python 2.6)"
+ - "Docker API >= 1.21"
+'''
+
+EXAMPLES = '''
+- name: Get infos on volume
+ community.docker.docker_volume_info:
+ name: mydata
+ register: result
+
+- name: Does volume exist?
+ ansible.builtin.debug:
+ msg: "The volume {{ 'exists' if result.exists else 'does not exist' }}"
+
+- name: Print information about volume
+ ansible.builtin.debug:
+ var: result.volume
+ when: result.exists
+'''
+
+RETURN = '''
+exists:
+ description:
+ - Returns whether the volume exists.
+ type: bool
+ returned: always
+ sample: true
+volume:
+ description:
+ - Volume inspection results for the affected volume.
+ - Will be C(none) if volume does not exist.
+ returned: success
+ type: dict
+ sample: '{
+ "CreatedAt": "2018-12-09T17:43:44+01:00",
+ "Driver": "local",
+ "Labels": null,
+ "Mountpoint": "/var/lib/docker/volumes/ansible-test-bd3f6172/_data",
+ "Name": "ansible-test-bd3f6172",
+ "Options": {},
+ "Scope": "local"
+ }'
+'''
+
+import traceback
+
+try:
+ from docker.errors import DockerException, NotFound
+except ImportError:
+ # missing Docker SDK for Python handled in ansible.module_utils.docker.common
+ pass
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClient,
+ RequestException,
+)
+
+
+def get_existing_volume(client, volume_name):
+ try:
+ return client.inspect_volume(volume_name)
+ except NotFound as dummy:
+ return None
+ except Exception as exc:
+ client.fail("Error inspecting volume: %s" % exc)
+
+
+def main():
+ argument_spec = dict(
+ name=dict(type='str', required=True, aliases=['volume_name']),
+ )
+
+ client = AnsibleDockerClient(
+ argument_spec=argument_spec,
+ supports_check_mode=True,
+ min_docker_version='1.8.0',
+ min_docker_api_version='1.21',
+ )
+
+ try:
+ volume = get_existing_volume(client, client.module.params['name'])
+
+ client.module.exit_json(
+ changed=False,
+ exists=(True if volume else False),
+ volume=volume,
+ )
+ except DockerException as e:
+ client.fail('An unexpected docker error occurred: {0}'.format(e), exception=traceback.format_exc())
+ except RequestException as e:
+ client.fail('An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}'.format(e), exception=traceback.format_exc())
+
+
+if __name__ == '__main__':
+ main()
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/common.py b/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/common.py
new file mode 100644
index 00000000..4742367e
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/common.py
@@ -0,0 +1,33 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+from ansible.errors import AnsibleConnectionFailure
+
+
+from ansible_collections.community.docker.plugins.module_utils.common import (
+ AnsibleDockerClientBase,
+ DOCKER_COMMON_ARGS,
+)
+
+
+class AnsibleDockerClient(AnsibleDockerClientBase):
+ def __init__(self, plugin, min_docker_version=None, min_docker_api_version=None):
+ self.plugin = plugin
+ super(AnsibleDockerClient, self).__init__(
+ min_docker_version=min_docker_version,
+ min_docker_api_version=min_docker_api_version)
+
+ def fail(self, msg, **kwargs):
+ if kwargs:
+ msg += '\nContext:\n' + '\n'.join(' {0} = {1!r}'.format(k, v) for (k, v) in kwargs.items())
+ raise AnsibleConnectionFailure(msg)
+
+ def _get_params(self):
+ return dict([
+ (option, self.plugin.get_option(option))
+ for option in DOCKER_COMMON_ARGS
+ ])
diff --git a/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py b/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py
new file mode 100644
index 00000000..bbaa1565
--- /dev/null
+++ b/collections-debian-merged/ansible_collections/community/docker/plugins/plugin_utils/socket_handler.py
@@ -0,0 +1,221 @@
+# Copyright (c) 2019-2020, Felix Fontein <felix@fontein.de>
+# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
+
+from __future__ import (absolute_import, division, print_function)
+__metaclass__ = type
+
+
+import fcntl
+import os
+import os.path
+import socket as pysocket
+
+from ansible.compat import selectors
+from ansible.module_utils.six import PY3
+
+try:
+ from docker.utils import socket as docker_socket
+ import struct
+except Exception:
+ # missing Docker SDK for Python handled in ansible_collections.community.docker.plugins.module_utils.common
+ pass
+
+
+PARAMIKO_POLL_TIMEOUT = 0.01 # 10 milliseconds
+
+
+class DockerSocketHandler:
+ def __init__(self, display, sock, container=None):
+ if hasattr(sock, '_sock'):
+ sock._sock.setblocking(0)
+ elif hasattr(sock, 'setblocking'):
+ sock.setblocking(0)
+ else:
+ fcntl.fcntl(sock.fileno(), fcntl.F_SETFL, fcntl.fcntl(sock.fileno(), fcntl.F_GETFL) | os.O_NONBLOCK)
+
+ self._display = display
+ self._paramiko_read_workaround = hasattr(sock, 'send_ready') and 'paramiko' in str(type(sock))
+
+ self._container = container
+
+ self._sock = sock
+ self._block_done_callback = None
+ self._block_buffer = []
+ self._eof = False
+ self._read_buffer = b''
+ self._write_buffer = b''
+ self._end_of_writing = False
+
+ self._current_stream = None
+ self._current_missing = 0
+ self._current_buffer = b''
+
+ self._selector = selectors.DefaultSelector()
+ self._selector.register(self._sock, selectors.EVENT_READ)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, type, value, tb):
+ self._selector.close()
+
+ def set_block_done_callback(self, block_done_callback):
+ self._block_done_callback = block_done_callback
+ if self._block_done_callback is not None:
+ while self._block_buffer:
+ elt = self._block_buffer.remove(0)
+ self._block_done_callback(*elt)
+
+ def _add_block(self, stream_id, data):
+ if self._block_done_callback is not None:
+ self._block_done_callback(stream_id, data)
+ else:
+ self._block_buffer.append((stream_id, data))
+
+ def _read(self):
+ if self._eof:
+ return
+ if hasattr(self._sock, 'recv'):
+ try:
+ data = self._sock.recv(262144)
+ except Exception as e:
+ # After calling self._sock.shutdown(), OpenSSL's/urllib3's
+ # WrappedSocket seems to eventually raise ZeroReturnError in
+ # case of EOF
+ if 'OpenSSL.SSL.ZeroReturnError' in str(type(e)):
+ self._eof = True
+ return
+ else:
+ raise
+ elif PY3 and isinstance(self._sock, getattr(pysocket, 'SocketIO')):
+ data = self._sock.read()
+ else:
+ data = os.read(self._sock.fileno())
+ if data is None:
+ # no data available
+ return
+ self._display.vvvv('read {0} bytes'.format(len(data)), host=self._container)
+ if len(data) == 0:
+ # Stream EOF
+ self._eof = True
+ return
+ self._read_buffer += data
+ while len(self._read_buffer) > 0:
+ if self._current_missing > 0:
+ n = min(len(self._read_buffer), self._current_missing)
+ self._current_buffer += self._read_buffer[:n]
+ self._read_buffer = self._read_buffer[n:]
+ self._current_missing -= n
+ if self._current_missing == 0:
+ self._add_block(self._current_stream, self._current_buffer)
+ self._current_buffer = b''
+ if len(self._read_buffer) < 8:
+ break
+ self._current_stream, self._current_missing = struct.unpack('>BxxxL', self._read_buffer[:8])
+ self._read_buffer = self._read_buffer[8:]
+ if self._current_missing < 0:
+ # Stream EOF (as reported by docker daemon)
+ self._eof = True
+ break
+
+ def _handle_end_of_writing(self):
+ if self._end_of_writing and len(self._write_buffer) == 0:
+ self._end_of_writing = False
+ self._display.vvvv('Shutting socket down for writing', host=self._container)
+ if hasattr(self._sock, 'shutdown_write'):
+ self._sock.shutdown_write()
+ elif hasattr(self._sock, 'shutdown'):
+ try:
+ self._sock.shutdown(pysocket.SHUT_WR)
+ except TypeError as e:
+ # probably: "TypeError: shutdown() takes 1 positional argument but 2 were given"
+ self._display.vvvv('Shutting down for writing not possible; trying shutdown instead: {0}'.format(e), host=self._container)
+ self._sock.shutdown()
+ elif PY3 and isinstance(self._sock, getattr(pysocket, 'SocketIO')):
+ self._sock._sock.shutdown(pysocket.SHUT_WR)
+ else:
+ self._display.vvvv('No idea how to signal end of writing', host=self._container)
+
+ def _write(self):
+ if len(self._write_buffer) > 0:
+ if hasattr(self._sock, '_send_until_done'):
+ # WrappedSocket (urllib3/contrib/pyopenssl) doesn't have `send`, but
+ # only `sendall`, which uses `_send_until_done` under the hood.
+ written = self._sock._send_until_done(self._write_buffer)
+ elif hasattr(self._sock, 'send'):
+ written = self._sock.send(self._write_buffer)
+ else:
+ written = os.write(self._sock.fileno(), self._write_buffer)
+ self._write_buffer = self._write_buffer[written:]
+ self._display.vvvv('wrote {0} bytes, {1} are left'.format(written, len(self._write_buffer)), host=self._container)
+ if len(self._write_buffer) > 0:
+ self._selector.modify(self._sock, selectors.EVENT_READ | selectors.EVENT_WRITE)
+ else:
+ self._selector.modify(self._sock, selectors.EVENT_READ)
+ self._handle_end_of_writing()
+
+ def select(self, timeout=None, _internal_recursion=False):
+ if not _internal_recursion and self._paramiko_read_workaround and len(self._write_buffer) > 0:
+ # When the SSH transport is used, docker-py internally uses Paramiko, whose
+ # Channel object supports select(), but only for reading
+ # (https://github.com/paramiko/paramiko/issues/695).
+ if self._sock.send_ready():
+ self._write()
+ return True
+ while timeout is None or timeout > PARAMIKO_POLL_TIMEOUT:
+ result = self.select(PARAMIKO_POLL_TIMEOUT, _internal_recursion=True)
+ if self._sock.send_ready():
+ self._read()
+ result += 1
+ if result > 0:
+ return True
+ if timeout is not None:
+ timeout -= PARAMIKO_POLL_TIMEOUT
+ self._display.vvvv('select... ({0})'.format(timeout), host=self._container)
+ events = self._selector.select(timeout)
+ for key, event in events:
+ if key.fileobj == self._sock:
+ self._display.vvvv(
+ 'select event read:{0} write:{1}'.format(event & selectors.EVENT_READ != 0, event & selectors.EVENT_WRITE != 0),
+ host=self._container)
+ if event & selectors.EVENT_READ != 0:
+ self._read()
+ if event & selectors.EVENT_WRITE != 0:
+ self._write()
+ result = len(events)
+ if self._paramiko_read_workaround and len(self._write_buffer) > 0:
+ if self._sock.send_ready():
+ self._write()
+ result += 1
+ return result > 0
+
+ def is_eof(self):
+ return self._eof
+
+ def end_of_writing(self):
+ self._end_of_writing = True
+ self._handle_end_of_writing()
+
+ def consume(self):
+ stdout = []
+ stderr = []
+
+ def append_block(stream_id, data):
+ if stream_id == docker_socket.STDOUT:
+ stdout.append(data)
+ elif stream_id == docker_socket.STDERR:
+ stderr.append(data)
+ else:
+ raise ValueError('{0} is not a valid stream ID'.format(stream_id))
+
+ self.end_of_writing()
+
+ self.set_block_done_callback(append_block)
+ while not self._eof:
+ self.select()
+ return b''.join(stdout), b''.join(stderr)
+
+ def write(self, str):
+ self._write_buffer += str
+ if len(self._write_buffer) == len(str):
+ self._write()