diff options
Diffstat (limited to '')
62 files changed, 5294 insertions, 940 deletions
diff --git a/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py b/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py index 372cbd0a3..fed0af00b 100644 --- a/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py +++ b/ansible_collections/community/docker/plugins/action/docker_container_copy_into.py @@ -15,7 +15,7 @@ from ansible_collections.community.docker.plugins.module_utils._scramble import class ActionModule(ActionBase): - # Set to True when transfering files to the remote + # Set to True when transferring files to the remote TRANSFERS_FILES = False def run(self, tmp=None, task_vars=None): diff --git a/ansible_collections/community/docker/plugins/connection/docker.py b/ansible_collections/community/docker/plugins/connection/docker.py index ba2249299..68247dae2 100644 --- a/ansible_collections/community/docker/plugins/connection/docker.py +++ b/ansible_collections/community/docker/plugins/connection/docker.py @@ -20,8 +20,7 @@ description: - Run commands or put/fetch files to an existing docker container. - Uses the Docker CLI to execute commands in the container. If you prefer to directly connect to the Docker daemon, use the - R(community.docker.docker_api,ansible_collections.community.docker.docker_api_connection) - connection plugin. + P(community.docker.docker_api#connection) connection plugin. options: remote_addr: description: diff --git a/ansible_collections/community/docker/plugins/connection/docker_api.py b/ansible_collections/community/docker/plugins/connection/docker_api.py index 24c95f55a..3b99281c3 100644 --- a/ansible_collections/community/docker/plugins/connection/docker_api.py +++ b/ansible_collections/community/docker/plugins/connection/docker_api.py @@ -15,8 +15,7 @@ description: - Run commands or put/fetch files to an existing docker container. - Uses the L(requests library,https://pypi.org/project/requests/) to interact directly with the Docker daemon instead of using the Docker CLI. Use the - R(community.docker.docker,ansible_collections.community.docker.docker_connection) - connection plugin if you want to use the Docker CLI. + P(community.docker.docker#connection) connection plugin if you want to use the Docker CLI. notes: - Does B(not work with TCP TLS sockets)! This is caused by the inability to send C(close_notify) without closing the connection with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605) for more information. diff --git a/ansible_collections/community/docker/plugins/connection/nsenter.py b/ansible_collections/community/docker/plugins/connection/nsenter.py index fff36afbb..f429f8cef 100644 --- a/ansible_collections/community/docker/plugins/connection/nsenter.py +++ b/ansible_collections/community/docker/plugins/connection/nsenter.py @@ -128,7 +128,7 @@ class Connection(ConnectionBase): # This plugin does not support pipelining. This diverges from the behavior of # the core "local" connection plugin that this one derives from. if sudoable and self.become and self.become.expect_prompt(): - # Create a pty if sudoable for privlege escalation that needs it. + # Create a pty if sudoable for privilege escalation that needs it. # Falls back to using a standard pipe if this fails, which may # cause the command to fail in certain situations where we are escalating # privileges or the command otherwise needs a pty. diff --git a/ansible_collections/community/docker/plugins/doc_fragments/compose_v2.py b/ansible_collections/community/docker/plugins/doc_fragments/compose_v2.py new file mode 100644 index 000000000..4e21f974c --- /dev/null +++ b/ansible_collections/community/docker/plugins/doc_fragments/compose_v2.py @@ -0,0 +1,63 @@ +# -*- coding: utf-8 -*- + +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Docker doc fragment + DOCUMENTATION = r''' +options: + project_src: + description: + - Path to a directory containing a Compose file + (C(compose.yml), C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)). + - If O(files) is provided, will look for these files in this directory instead. + type: path + required: true + project_name: + description: + - Provide a project name. If not provided, the project name is taken from the basename of O(project_src). + type: str + files: + description: + - List of Compose file names relative to O(project_src) to be used instead of the main Compose file + (C(compose.yml), C(compose.yaml), C(docker-compose.yml), or C(docker-compose.yaml)). + - Files are loaded and merged in the order given. + type: list + elements: path + version_added: 3.7.0 + env_files: + description: + - By default environment files are loaded from a C(.env) file located directly under the O(project_src) directory. + - O(env_files) can be used to specify the path of one or multiple custom environment files instead. + - The path is relative to the O(project_src) directory. + type: list + elements: path + profiles: + description: + - List of profiles to enable when starting services. + - Equivalent to C(docker compose --profile). + type: list + elements: str +notes: + - |- + The Docker compose CLI plugin has no stable output format (see for example U(https://github.com/docker/compose/issues/10872)), + and for the main operations also no machine friendly output format. The module tries to accomodate this with various + version-dependent behavior adjustments and with testing older and newer versions of the Docker compose CLI plugin. + + Currently the module is tested with multiple plugin versions between 2.18.1 and 2.23.3. The exact list of plugin versions + will change over time. New releases of the Docker compose CLI plugin can break this module at any time. +''' + + # The following needs to be kept in sync with the compose_v2 module utils + MINIMUM_VERSION = r''' +options: {} +requirements: + - "Docker CLI with Docker compose plugin 2.18.0 or later" +''' diff --git a/ansible_collections/community/docker/plugins/doc_fragments/docker.py b/ansible_collections/community/docker/plugins/doc_fragments/docker.py index 4c537850e..92989a97b 100644 --- a/ansible_collections/community/docker/plugins/doc_fragments/docker.py +++ b/ansible_collections/community/docker/plugins/doc_fragments/docker.py @@ -16,25 +16,27 @@ options: docker_host: description: - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the - TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, + TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, the module will automatically replace C(tcp) in the connection URL with C(https). - - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used + - If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used instead. If the environment variable is not set, the default value will be used. type: str - default: unix://var/run/docker.sock + default: unix:///var/run/docker.sock aliases: [ docker_url ] tls_hostname: description: - When verifying the authenticity of the Docker Host server, provide the expected name of the server. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will + - If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will be used instead. If the environment variable is not set, the default value will be used. - - Note that this option had a default value C(localhost) in older versions. It was removed in community.docker 3.0.0. + - Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0. + - B(Note:) this option is no longer supported for Docker SDK for Python 7.0.0+. Specifying it with Docker SDK for + Python 7.0.0 or newer will lead to an error. type: str api_version: description: - The version of the Docker API running on the Docker Host. - Defaults to the latest version of the API supported by Docker SDK for Python and the docker daemon. - - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be + - If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be used instead. If the environment variable is not set, the default value will be used. type: str default: auto @@ -42,42 +44,46 @@ options: timeout: description: - The maximum amount of time in seconds to wait on a response from the API. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used + - If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used instead. If the environment variable is not set, the default value will be used. type: int default: 60 - ca_cert: + ca_path: description: - Use a CA certificate when performing server verification by providing the path to a CA certificate file. - - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, - the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + - If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, + the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used. + - This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has + been added as an alias and can still be used. type: path - aliases: [ tls_ca_cert, cacert_path ] + aliases: [ ca_cert, tls_ca_cert, cacert_path ] client_cert: description: - Path to the client's TLS certificate file. - - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, - the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + - If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, + the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used. type: path aliases: [ tls_client_cert, cert_path ] client_key: description: - Path to the client's TLS key file. - - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, - the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + - If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, + the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used. type: path aliases: [ tls_client_key, key_path ] ssl_version: description: - - Provide a valid SSL version number. Default value determined by ssl.py module. - - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be + - Provide a valid SSL version number. Default value determined by L(SSL Python module, https://docs.python.org/3/library/ssl.html). + - If the value is not specified in the task, the value of environment variable E(DOCKER_SSL_VERSION) will be used instead. + - B(Note:) this option is no longer supported for Docker SDK for Python 7.0.0+. Specifying it with Docker SDK for + Python 7.0.0 or newer will lead to an error. type: str tls: description: - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host - server. Note that if I(validate_certs) is set to C(true) as well, it will take precedence. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used + server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence. + - If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used instead. If the environment variable is not set, the default value will be used. type: bool default: false @@ -91,7 +97,7 @@ options: validate_certs: description: - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be + - If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be used instead. If the environment variable is not set, the default value will be used. type: bool default: false @@ -104,14 +110,14 @@ options: notes: - Connect to the Docker daemon by providing parameters with each task or by defining environment variables. - You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION), - C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped + You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH), E(DOCKER_SSL_VERSION), + E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped with the product that sets up the environment. It will set these variables for you. See U(https://docs.docker.com/machine/reference/env/) for more details. - When connecting to Docker daemon with TLS, you might need to install additional Python packages. For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip). - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions. - In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified, + In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified, and use C($DOCKER_CONFIG/config.json) otherwise. ''' @@ -131,9 +137,11 @@ options: timeout: vars: - name: ansible_docker_timeout - ca_cert: + ca_path: vars: - name: ansible_docker_ca_cert + - name: ansible_docker_ca_path + version_added: 3.6.0 client_cert: vars: - name: ansible_docker_client_cert @@ -189,25 +197,25 @@ options: docker_host: description: - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the - TCP connection string. For example, C(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, + TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, the module will automatically replace C(tcp) in the connection URL with C(https). - - If the value is not specified in the task, the value of environment variable C(DOCKER_HOST) will be used + - If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used instead. If the environment variable is not set, the default value will be used. type: str - default: unix://var/run/docker.sock + default: unix:///var/run/docker.sock aliases: [ docker_url ] tls_hostname: description: - When verifying the authenticity of the Docker Host server, provide the expected name of the server. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_HOSTNAME) will + - If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will be used instead. If the environment variable is not set, the default value will be used. - - Note that this option had a default value C(localhost) in older versions. It was removed in community.docker 3.0.0. + - Note that this option had a default value V(localhost) in older versions. It was removed in community.docker 3.0.0. type: str api_version: description: - The version of the Docker API running on the Docker Host. - Defaults to the latest version of the API supported by this collection and the docker daemon. - - If the value is not specified in the task, the value of environment variable C(DOCKER_API_VERSION) will be + - If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be used instead. If the environment variable is not set, the default value will be used. type: str default: auto @@ -215,42 +223,44 @@ options: timeout: description: - The maximum amount of time in seconds to wait on a response from the API. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) will be used + - If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT) will be used instead. If the environment variable is not set, the default value will be used. type: int default: 60 - ca_cert: + ca_path: description: - Use a CA certificate when performing server verification by providing the path to a CA certificate file. - - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, - the file C(ca.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + - If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, + the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used. + - This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has + been added as an alias and can still be used. type: path - aliases: [ tls_ca_cert, cacert_path ] + aliases: [ ca_cert, tls_ca_cert, cacert_path ] client_cert: description: - Path to the client's TLS certificate file. - - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, - the file C(cert.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + - If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, + the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used. type: path aliases: [ tls_client_cert, cert_path ] client_key: description: - Path to the client's TLS key file. - - If the value is not specified in the task and the environment variable C(DOCKER_CERT_PATH) is set, - the file C(key.pem) from the directory specified in the environment variable C(DOCKER_CERT_PATH) will be used. + - If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, + the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used. type: path aliases: [ tls_client_key, key_path ] ssl_version: description: - - Provide a valid SSL version number. Default value determined by ssl.py module. - - If the value is not specified in the task, the value of environment variable C(DOCKER_SSL_VERSION) will be + - Provide a valid SSL version number. Default value determined by L(SSL Python module, https://docs.python.org/3/library/ssl.html). + - If the value is not specified in the task, the value of environment variable E(DOCKER_SSL_VERSION) will be used instead. type: str tls: description: - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host - server. Note that if I(validate_certs) is set to C(true) as well, it will take precedence. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS) will be used + server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence. + - If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used instead. If the environment variable is not set, the default value will be used. type: bool default: false @@ -263,7 +273,7 @@ options: validate_certs: description: - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TLS_VERIFY) will be + - If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be used instead. If the environment variable is not set, the default value will be used. type: bool default: false @@ -276,14 +286,12 @@ options: notes: - Connect to the Docker daemon by providing parameters with each task or by defining environment variables. - You can define C(DOCKER_HOST), C(DOCKER_TLS_HOSTNAME), C(DOCKER_API_VERSION), C(DOCKER_CERT_PATH), C(DOCKER_SSL_VERSION), - C(DOCKER_TLS), C(DOCKER_TLS_VERIFY) and C(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped + You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH), E(DOCKER_SSL_VERSION), + E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped with the product that sets up the environment. It will set these variables for you. See U(https://docs.docker.com/machine/reference/env/) for more details. -# - When connecting to Docker daemon with TLS, you might need to install additional Python packages. -# For the Docker SDK for Python, version 2.4 or newer, this can be done by installing C(docker[tls]) with M(ansible.builtin.pip). # - Note that the Docker SDK for Python only allows to specify the path to the Docker configuration for very few functions. -# In general, it will use C($HOME/.docker/config.json) if the C(DOCKER_CONFIG) environment variable is not specified, +# In general, it will use C($HOME/.docker/config.json) if the E(DOCKER_CONFIG) environment variable is not specified, # and use C($DOCKER_CONFIG/config.json) otherwise. - This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to communicate with the Docker daemon. It uses code derived from the Docker SDK or Python that is included in this @@ -291,7 +299,96 @@ notes: requirements: - requests - pywin32 (when using named pipes on Windows 32) - - paramiko (when using SSH with I(use_ssh_client=false)) + - paramiko (when using SSH with O(use_ssh_client=false)) - pyOpenSSL (when using TLS) - backports.ssl_match_hostname (when using TLS on Python 2) ''' + + # Docker doc fragment when using the Docker CLI + CLI_DOCUMENTATION = r''' +options: + docker_cli: + description: + - Path to the Docker CLI. If not provided, will search for Docker CLI on the E(PATH). + type: path + docker_host: + description: + - The URL or Unix socket path used to connect to the Docker API. To connect to a remote host, provide the + TCP connection string. For example, V(tcp://192.0.2.23:2376). If TLS is used to encrypt the connection, + the module will automatically replace C(tcp) in the connection URL with C(https). + - If the value is not specified in the task, the value of environment variable E(DOCKER_HOST) will be used + instead. If the environment variable is not set, the default value will be used. + type: str + default: unix:///var/run/docker.sock + aliases: [ docker_url ] + tls_hostname: + description: + - When verifying the authenticity of the Docker Host server, provide the expected name of the server. + - If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_HOSTNAME) will + be used instead. If the environment variable is not set, the default value will be used. + type: str + api_version: + description: + - The version of the Docker API running on the Docker Host. + - Defaults to the latest version of the API supported by this collection and the docker daemon. + - If the value is not specified in the task, the value of environment variable E(DOCKER_API_VERSION) will be + used instead. If the environment variable is not set, the default value will be used. + type: str + default: auto + aliases: [ docker_api_version ] + ca_path: + description: + - Use a CA certificate when performing server verification by providing the path to a CA certificate file. + - If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, + the file C(ca.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used. + type: path + aliases: [ ca_cert, tls_ca_cert, cacert_path ] + client_cert: + description: + - Path to the client's TLS certificate file. + - If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, + the file C(cert.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used. + type: path + aliases: [ tls_client_cert, cert_path ] + client_key: + description: + - Path to the client's TLS key file. + - If the value is not specified in the task and the environment variable E(DOCKER_CERT_PATH) is set, + the file C(key.pem) from the directory specified in the environment variable E(DOCKER_CERT_PATH) will be used. + type: path + aliases: [ tls_client_key, key_path ] + tls: + description: + - Secure the connection to the API by using TLS without verifying the authenticity of the Docker host + server. Note that if O(validate_certs) is set to V(true) as well, it will take precedence. + - If the value is not specified in the task, the value of environment variable E(DOCKER_TLS) will be used + instead. If the environment variable is not set, the default value will be used. + type: bool + default: false + validate_certs: + description: + - Secure the connection to the API by using TLS and verifying the authenticity of the Docker host server. + - If the value is not specified in the task, the value of environment variable E(DOCKER_TLS_VERIFY) will be + used instead. If the environment variable is not set, the default value will be used. + type: bool + default: false + aliases: [ tls_verify ] + # debug: + # description: + # - Debug mode + # type: bool + # default: false + cli_context: + description: + - The Docker CLI context to use. + type: str + +notes: + - Connect to the Docker daemon by providing parameters with each task or by defining environment variables. + You can define E(DOCKER_HOST), E(DOCKER_TLS_HOSTNAME), E(DOCKER_API_VERSION), E(DOCKER_CERT_PATH), + E(DOCKER_TLS), E(DOCKER_TLS_VERIFY) and E(DOCKER_TIMEOUT). If you are using docker machine, run the script shipped + with the product that sets up the environment. It will set these variables for you. See + U(https://docs.docker.com/machine/reference/env/) for more details. + - This module does B(not) use the L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) to + communicate with the Docker daemon. It directly calls the Docker CLI program. +''' diff --git a/ansible_collections/community/docker/plugins/inventory/docker_containers.py b/ansible_collections/community/docker/plugins/inventory/docker_containers.py index a82cda955..75b49ff92 100644 --- a/ansible_collections/community/docker/plugins/inventory/docker_containers.py +++ b/ansible_collections/community/docker/plugins/inventory/docker_containers.py @@ -21,13 +21,14 @@ author: extends_documentation_fragment: - ansible.builtin.constructed - community.docker.docker.api_documentation + - community.library_inventory_filtering_v1.inventory_filter description: - Reads inventories from the Docker API. - Uses a YAML configuration file that ends with C(docker.[yml|yaml]). options: plugin: description: - - The name of this plugin, it should always be set to C(community.docker.docker_containers) + - The name of this plugin, it should always be set to V(community.docker.docker_containers) for this plugin to recognize it as it's own. type: str required: true @@ -36,17 +37,14 @@ options: connection_type: description: - Which connection type to use the containers. - - One way to connect to containers is to use SSH (C(ssh)). For this, the options I(default_ip) and - I(private_ssh_port) are used. This requires that a SSH daemon is running inside the containers. - - Alternatively, C(docker-cli) selects the - R(docker connection plugin,ansible_collections.community.docker.docker_connection), - and C(docker-api) (default) selects the - R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection). - - When C(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin - to the connection plugin. This can be controlled with I(configure_docker_daemon). - - Note that the R(docker_api connection plugin,ansible_collections.community.docker.docker_api_connection) - does B(not work with TCP TLS sockets)! See U(https://github.com/ansible-collections/community.docker/issues/605) - for more information. + - One way to connect to containers is to use SSH (V(ssh)). For this, the options O(default_ip) and + O(private_ssh_port) are used. This requires that a SSH daemon is running inside the containers. + - Alternatively, V(docker-cli) selects the P(community.docker.docker#connection) connection plugin, + and V(docker-api) (default) selects the P(community.docker.docker_api#connection) connection plugin. + - When V(docker-api) is used, all Docker daemon configuration values are passed from the inventory plugin + to the connection plugin. This can be controlled with O(configure_docker_daemon). + - Note that the P(community.docker.docker_api#connection) does B(not work with TCP TLS sockets)! + See U(https://github.com/ansible-collections/community.docker/issues/605) for more information. type: str default: docker-api choices: @@ -57,7 +55,7 @@ options: configure_docker_daemon: description: - Whether to pass all Docker daemon configuration from the inventory plugin to the connection plugin. - - Only used when I(connection_type=docker-api). + - Only used when O(connection_type=docker-api). type: bool default: true version_added: 1.8.0 @@ -67,8 +65,8 @@ options: - Toggle to (not) include all available inspection metadata. - Note that all top-level keys will be transformed to the format C(docker_xxx). For example, C(HostConfig) is converted to C(docker_hostconfig). - - If this is C(false), these values can only be used during I(constructed), I(groups), and I(keyed_groups). - - The C(docker) inventory script always added these variables, so for compatibility set this to C(true). + - If this is V(false), these values can only be used during O(compose), O(groups), and O(keyed_groups). + - The C(docker) inventory script always added these variables, so for compatibility set this to V(true). type: bool default: false @@ -76,14 +74,14 @@ options: description: - The IP address to assign to ansible_host when the container's SSH port is mapped to interface '0.0.0.0'. - - Only used if I(connection_type) is C(ssh). + - Only used if O(connection_type) is V(ssh). type: str default: 127.0.0.1 private_ssh_port: description: - The port containers use for SSH. - - Only used if I(connection_type) is C(ssh). + - Only used if O(connection_type) is V(ssh). type: int default: 22 @@ -96,20 +94,23 @@ options: - "C(image_<image name>): contains the containers that have the image C(<image name>)." - "C(stack_<stack name>): contains the containers that belong to the stack C(<stack name>)." - "C(service_<service name>): contains the containers that belong to the service C(<service name>)" - - "C(<docker_host>): contains the containers which belong to the Docker daemon I(docker_host). + - "C(<docker_host>): contains the containers which belong to the Docker daemon O(docker_host). Useful if you run this plugin against multiple Docker daemons." - "C(running): contains all containers that are running." - "C(stopped): contains all containers that are not running." - - If this is not set to C(true), you should use keyed groups to add the containers to groups. + - If this is not set to V(true), you should use keyed groups to add the containers to groups. See the examples for how to do that. type: bool default: false + + filters: + version_added: 3.5.0 ''' EXAMPLES = ''' # Minimal example using local Docker daemon plugin: community.docker.docker_containers -docker_host: unix://var/run/docker.sock +docker_host: unix:///var/run/docker.sock # Minimal example using remote Docker daemon plugin: community.docker.docker_containers @@ -124,7 +125,7 @@ tls: true plugin: community.docker.docker_containers docker_host: tcp://my-docker-host:2376 validate_certs: true -ca_cert: /somewhere/ca.pem +ca_path: /somewhere/ca.pem client_key: /somewhere/key.pem client_cert: /somewhere/cert.pem @@ -147,6 +148,18 @@ connection_type: ssh compose: ansible_ssh_host: ansible_ssh_host | default(docker_name[1:], true) ansible_ssh_port: ansible_ssh_port | default(22, true) + +# Only consider containers which have a label 'foo', or whose name starts with 'a' +plugin: community.docker.docker_containers +filters: + # Accept all containers which have a label called 'foo' + - include: >- + "foo" in docker_config.Labels + # Next accept all containers whose inventory_hostname starts with 'a' + - include: >- + inventory_hostname.startswith("a") + # Exclude all containers that didn't match any of the above filters + - exclude: true ''' import re @@ -154,6 +167,7 @@ import re from ansible.errors import AnsibleError from ansible.module_utils.common.text.converters import to_native from ansible.plugins.inventory import BaseInventoryPlugin, Constructable +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe from ansible_collections.community.docker.plugins.module_utils.common_api import ( RequestException, @@ -166,6 +180,7 @@ from ansible_collections.community.docker.plugins.plugin_utils.common_api import ) from ansible_collections.community.docker.plugins.module_utils._api.errors import APIError, DockerException +from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import parse_filters, filter_host MIN_DOCKER_API = None @@ -212,6 +227,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): if value is not None: extra_facts[var_name] = value + filters = parse_filters(self.get_option('filters')) for container in containers: id = container.get('Id') short_id = id[:13] @@ -223,10 +239,9 @@ class InventoryModule(BaseInventoryPlugin, Constructable): name = short_id full_name = id - self.inventory.add_host(name) facts = dict( - docker_name=name, - docker_short_id=short_id + docker_name=make_unsafe(name), + docker_short_id=make_unsafe(short_id), ) full_facts = dict() @@ -241,26 +256,26 @@ class InventoryModule(BaseInventoryPlugin, Constructable): running = state.get('Running') + groups = [] + # Add container to groups image_name = config.get('Image') if image_name and add_legacy_groups: - self.inventory.add_group('image_{0}'.format(image_name)) - self.inventory.add_host(name, group='image_{0}'.format(image_name)) + groups.append('image_{0}'.format(image_name)) stack_name = labels.get('com.docker.stack.namespace') if stack_name: full_facts['docker_stack'] = stack_name if add_legacy_groups: - self.inventory.add_group('stack_{0}'.format(stack_name)) - self.inventory.add_host(name, group='stack_{0}'.format(stack_name)) + groups.append('stack_{0}'.format(stack_name)) service_name = labels.get('com.docker.swarm.service.name') if service_name: full_facts['docker_service'] = service_name if add_legacy_groups: - self.inventory.add_group('service_{0}'.format(service_name)) - self.inventory.add_host(name, group='service_{0}'.format(service_name)) + groups.append('service_{0}'.format(service_name)) + ansible_connection = None if connection_type == 'ssh': # Figure out ssh IP and Port try: @@ -283,23 +298,38 @@ class InventoryModule(BaseInventoryPlugin, Constructable): elif connection_type == 'docker-cli': facts.update(dict( ansible_host=full_name, - ansible_connection='community.docker.docker', )) + ansible_connection = 'community.docker.docker' elif connection_type == 'docker-api': facts.update(dict( ansible_host=full_name, - ansible_connection='community.docker.docker_api', )) facts.update(extra_facts) + ansible_connection = 'community.docker.docker_api' full_facts.update(facts) for key, value in inspect.items(): fact_key = self._slugify(key) full_facts[fact_key] = value + full_facts = make_unsafe(full_facts) + + if ansible_connection: + for d in (facts, full_facts): + if 'ansible_connection' not in d: + d['ansible_connection'] = ansible_connection + + if not filter_host(self, name, full_facts, filters): + continue + if verbose_output: facts.update(full_facts) + self.inventory.add_host(name) + for group in groups: + self.inventory.add_group(group) + self.inventory.add_host(name, group=group) + for key, value in facts.items(): self.inventory.set_variable(name, key, value) diff --git a/ansible_collections/community/docker/plugins/inventory/docker_machine.py b/ansible_collections/community/docker/plugins/inventory/docker_machine.py index 69d946100..e3330a339 100644 --- a/ansible_collections/community/docker/plugins/inventory/docker_machine.py +++ b/ansible_collections/community/docker/plugins/inventory/docker_machine.py @@ -13,12 +13,13 @@ DOCUMENTATION = ''' requirements: - L(Docker Machine,https://docs.docker.com/machine/) extends_documentation_fragment: - - constructed + - ansible.builtin.constructed + - community.library_inventory_filtering_v1.inventory_filter description: - Get inventory hosts from Docker Machine. - Uses a YAML configuration file that ends with docker_machine.(yml|yaml). - The plugin sets standard host variables C(ansible_host), C(ansible_port), C(ansible_user) and C(ansible_ssh_private_key). - - The plugin stores the Docker Machine 'env' output variables in I(dm_) prefixed host variables. + - The plugin stores the Docker Machine 'env' output variables in C(dm_) prefixed host variables. options: plugin: @@ -28,12 +29,12 @@ DOCUMENTATION = ''' daemon_env: description: - Whether docker daemon connection environment variables should be fetched, and how to behave if they cannot be fetched. - - With C(require) and C(require-silently), fetch them and skip any host for which they cannot be fetched. - A warning will be issued for any skipped host if the choice is C(require). - - With C(optional) and C(optional-silently), fetch them and not skip hosts for which they cannot be fetched. - A warning will be issued for hosts where they cannot be fetched if the choice is C(optional). - - With C(skip), do not attempt to fetch the docker daemon connection environment variables. - - If fetched successfully, the variables will be prefixed with I(dm_) and stored as host variables. + - With V(require) and V(require-silently), fetch them and skip any host for which they cannot be fetched. + A warning will be issued for any skipped host if the choice is V(require). + - With V(optional) and V(optional-silently), fetch them and not skip hosts for which they cannot be fetched. + A warning will be issued for hosts where they cannot be fetched if the choice is V(optional). + - With V(skip), do not attempt to fetch the docker daemon connection environment variables. + - If fetched successfully, the variables will be prefixed with C(dm_) and stored as host variables. type: str choices: - require @@ -44,15 +45,17 @@ DOCUMENTATION = ''' default: require running_required: description: - - When C(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped. + - When V(true), hosts which Docker Machine indicates are in a state other than C(running) will be skipped. type: bool default: true verbose_output: description: - - When C(true), include all available nodes metadata (for exmaple C(Image), C(Region), C(Size)) as a JSON object + - When V(true), include all available nodes metadata (for example C(Image), C(Region), C(Size)) as a JSON object named C(docker_machine_node_attributes). type: bool default: true + filters: + version_added: 3.5.0 ''' EXAMPLES = ''' @@ -93,6 +96,9 @@ from ansible.module_utils.common.text.converters import to_text from ansible.module_utils.common.process import get_bin_path from ansible.plugins.inventory import BaseInventoryPlugin, Constructable, Cacheable from ansible.utils.display import Display +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import parse_filters, filter_host import json import re @@ -173,7 +179,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _inspect_docker_machine_host(self, node): try: - inspect_lines = self._run_command(['inspect', self.node]) + inspect_lines = self._run_command(['inspect', node]) except subprocess.CalledProcessError: return None @@ -181,7 +187,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _ip_addr_docker_machine_host(self, node): try: - ip_addr = self._run_command(['ip', self.node]) + ip_addr = self._run_command(['ip', node]) except subprocess.CalledProcessError: return None @@ -201,13 +207,18 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): def _populate(self): daemon_env = self.get_option('daemon_env') + filters = parse_filters(self.get_option('filters')) try: - for self.node in self._get_machine_names(): - self.node_attrs = self._inspect_docker_machine_host(self.node) - if not self.node_attrs: + for node in self._get_machine_names(): + node_attrs = self._inspect_docker_machine_host(node) + if not node_attrs: continue - machine_name = self.node_attrs['Driver']['MachineName'] + unsafe_node_attrs = make_unsafe(node_attrs) + + machine_name = unsafe_node_attrs['Driver']['MachineName'] + if not filter_host(self, machine_name, unsafe_node_attrs, filters): + continue # query `docker-machine env` to obtain remote Docker daemon connection settings in the form of commands # that could be used to set environment variables to influence a local Docker client: @@ -224,40 +235,40 @@ class InventoryModule(BaseInventoryPlugin, Constructable, Cacheable): # check for valid ip address from inspect output, else explicitly use ip command to find host ip address # this works around an issue seen with Google Compute Platform where the IP address was not available # via the 'inspect' subcommand but was via the 'ip' subcomannd. - if self.node_attrs['Driver']['IPAddress']: - ip_addr = self.node_attrs['Driver']['IPAddress'] + if unsafe_node_attrs['Driver']['IPAddress']: + ip_addr = unsafe_node_attrs['Driver']['IPAddress'] else: - ip_addr = self._ip_addr_docker_machine_host(self.node) + ip_addr = self._ip_addr_docker_machine_host(node) # set standard Ansible remote host connection settings to details captured from `docker-machine` # see: https://docs.ansible.com/ansible/latest/user_guide/intro_inventory.html - self.inventory.set_variable(machine_name, 'ansible_host', ip_addr) - self.inventory.set_variable(machine_name, 'ansible_port', self.node_attrs['Driver']['SSHPort']) - self.inventory.set_variable(machine_name, 'ansible_user', self.node_attrs['Driver']['SSHUser']) - self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', self.node_attrs['Driver']['SSHKeyPath']) + self.inventory.set_variable(machine_name, 'ansible_host', make_unsafe(ip_addr)) + self.inventory.set_variable(machine_name, 'ansible_port', unsafe_node_attrs['Driver']['SSHPort']) + self.inventory.set_variable(machine_name, 'ansible_user', unsafe_node_attrs['Driver']['SSHUser']) + self.inventory.set_variable(machine_name, 'ansible_ssh_private_key_file', unsafe_node_attrs['Driver']['SSHKeyPath']) # set variables based on Docker Machine tags - tags = self.node_attrs['Driver'].get('Tags') or '' - self.inventory.set_variable(machine_name, 'dm_tags', tags) + tags = unsafe_node_attrs['Driver'].get('Tags') or '' + self.inventory.set_variable(machine_name, 'dm_tags', make_unsafe(tags)) # set variables based on Docker Machine env variables for kv in env_var_tuples: - self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), kv[1]) + self.inventory.set_variable(machine_name, 'dm_{0}'.format(kv[0]), make_unsafe(kv[1])) if self.get_option('verbose_output'): - self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', self.node_attrs) + self.inventory.set_variable(machine_name, 'docker_machine_node_attributes', unsafe_node_attrs) # Use constructed if applicable strict = self.get_option('strict') # Composed variables - self._set_composite_vars(self.get_option('compose'), self.node_attrs, machine_name, strict=strict) + self._set_composite_vars(self.get_option('compose'), unsafe_node_attrs, machine_name, strict=strict) # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group - self._add_host_to_composed_groups(self.get_option('groups'), self.node_attrs, machine_name, strict=strict) + self._add_host_to_composed_groups(self.get_option('groups'), unsafe_node_attrs, machine_name, strict=strict) # Create groups based on variable values and add the corresponding hosts to it - self._add_host_to_keyed_groups(self.get_option('keyed_groups'), self.node_attrs, machine_name, strict=strict) + self._add_host_to_keyed_groups(self.get_option('keyed_groups'), unsafe_node_attrs, machine_name, strict=strict) except Exception as e: raise AnsibleError('Unable to fetch hosts from Docker Machine, this was the original exception: %s' % diff --git a/ansible_collections/community/docker/plugins/inventory/docker_swarm.py b/ansible_collections/community/docker/plugins/inventory/docker_swarm.py index ebb1da15c..0d60033f9 100644 --- a/ansible_collections/community/docker/plugins/inventory/docker_swarm.py +++ b/ansible_collections/community/docker/plugins/inventory/docker_swarm.py @@ -17,16 +17,17 @@ DOCUMENTATION = ''' - python >= 2.7 - L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.10.0 extends_documentation_fragment: - - constructed + - ansible.builtin.constructed + - community.library_inventory_filtering_v1.inventory_filter description: - Reads inventories from the Docker swarm API. - Uses a YAML configuration file docker_swarm.[yml|yaml]. - - "The plugin returns following groups of swarm nodes: I(all) - all hosts; I(workers) - all worker nodes; - I(managers) - all manager nodes; I(leader) - the swarm leader node; - I(nonleaders) - all nodes except the swarm leader." + - "The plugin returns following groups of swarm nodes: C(all) - all hosts; C(workers) - all worker nodes; + C(managers) - all manager nodes; C(leader) - the swarm leader node; + C(nonleaders) - all nodes except the swarm leader." options: plugin: - description: The name of this plugin, it should always be set to C(community.docker.docker_swarm) + description: The name of this plugin, it should always be set to V(community.docker.docker_swarm) for this plugin to recognize it as it's own. type: str required: true @@ -34,13 +35,13 @@ DOCUMENTATION = ''' docker_host: description: - Socket of a Docker swarm manager node (C(tcp), C(unix)). - - "Use C(unix://var/run/docker.sock) to connect via local socket." + - "Use V(unix:///var/run/docker.sock) to connect via local socket." type: str required: true aliases: [ docker_url ] verbose_output: description: Toggle to (not) include all available nodes metadata (for example C(Platform), C(Architecture), C(OS), - C(EngineVersion)) + C(EngineVersion)). type: bool default: true tls: @@ -57,11 +58,13 @@ DOCUMENTATION = ''' description: Path to the client's TLS key file. type: path aliases: [ tls_client_key, key_path ] - ca_cert: - description: Use a CA certificate when performing server verification by providing the path to a CA - certificate file. + ca_path: + description: + - Use a CA certificate when performing server verification by providing the path to a CA certificate file. + - This option was called O(ca_cert) and got renamed to O(ca_path) in community.docker 3.6.0. The old name has + been added as an alias and can still be used. type: path - aliases: [ tls_ca_cert, cacert_path ] + aliases: [ ca_cert, tls_ca_cert, cacert_path ] client_cert: description: Path to the client's TLS certificate file. type: path @@ -71,7 +74,9 @@ DOCUMENTATION = ''' the server. type: str ssl_version: - description: Provide a valid SSL version number. Default value determined by ssl.py module. + description: + - Provide a valid SSL version number. Default value determined + by L(SSL Python module, https://docs.python.org/3/library/ssl.html). type: str api_version: description: @@ -82,7 +87,7 @@ DOCUMENTATION = ''' timeout: description: - The maximum amount of time in seconds to wait on a response from the API. - - If the value is not specified in the task, the value of environment variable C(DOCKER_TIMEOUT) + - If the value is not specified in the task, the value of environment variable E(DOCKER_TIMEOUT). will be used instead. If the environment variable is not set, the default value will be used. type: int default: 60 @@ -96,20 +101,22 @@ DOCUMENTATION = ''' version_added: 1.5.0 include_host_uri: description: Toggle to return the additional attribute C(ansible_host_uri) which contains the URI of the - swarm leader in format of C(tcp://172.16.0.1:2376). This value may be used without additional - modification as value of option I(docker_host) in Docker Swarm modules when connecting via API. - The port always defaults to C(2376). + swarm leader in format of V(tcp://172.16.0.1:2376). This value may be used without additional + modification as value of option O(docker_host) in Docker Swarm modules when connecting via API. + The port always defaults to V(2376). type: bool default: false include_host_uri_port: - description: Override the detected port number included in I(ansible_host_uri) + description: Override the detected port number included in C(ansible_host_uri). type: int + filters: + version_added: 3.5.0 ''' EXAMPLES = ''' # Minimal example using local docker plugin: community.docker.docker_swarm -docker_host: unix://var/run/docker.sock +docker_host: unix:///var/run/docker.sock # Minimal example using remote docker plugin: community.docker.docker_swarm @@ -124,7 +131,7 @@ tls: true plugin: community.docker.docker_swarm docker_host: tcp://my-docker-host:2376 validate_certs: true -ca_cert: /somewhere/ca.pem +ca_path: /somewhere/ca.pem client_key: /somewhere/key.pem client_cert: /somewhere/cert.pem @@ -152,6 +159,9 @@ from ansible_collections.community.docker.plugins.module_utils.common import get from ansible_collections.community.docker.plugins.module_utils.util import update_tls_hostname from ansible.plugins.inventory import BaseInventoryPlugin, Constructable from ansible.parsing.utils.addresses import parse_address +from ansible.utils.unsafe_proxy import wrap_var as make_unsafe + +from ansible_collections.community.library_inventory_filtering_v1.plugins.plugin_utils.inventory_filter import parse_filters, filter_host try: import docker @@ -174,7 +184,7 @@ class InventoryModule(BaseInventoryPlugin, Constructable): tls=self.get_option('tls'), tls_verify=self.get_option('validate_certs'), key_path=self.get_option('client_key'), - cacert_path=self.get_option('ca_cert'), + cacert_path=self.get_option('ca_path'), cert_path=self.get_option('client_cert'), tls_hostname=self.get_option('tls_hostname'), api_version=self.get_option('api_version'), @@ -192,6 +202,8 @@ class InventoryModule(BaseInventoryPlugin, Constructable): self.inventory.add_group('leader') self.inventory.add_group('nonleaders') + filters = parse_filters(self.get_option('filters')) + if self.get_option('include_host_uri'): if self.get_option('include_host_uri_port'): host_uri_port = str(self.get_option('include_host_uri_port')) @@ -202,48 +214,51 @@ class InventoryModule(BaseInventoryPlugin, Constructable): try: self.nodes = self.client.nodes.list() - for self.node in self.nodes: - self.node_attrs = self.client.nodes.get(self.node.id).attrs - self.inventory.add_host(self.node_attrs['ID']) - self.inventory.add_host(self.node_attrs['ID'], group=self.node_attrs['Spec']['Role']) - self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', - self.node_attrs['Status']['Addr']) + for node in self.nodes: + node_attrs = self.client.nodes.get(node.id).attrs + unsafe_node_attrs = make_unsafe(node_attrs) + if not filter_host(self, unsafe_node_attrs['ID'], unsafe_node_attrs, filters): + continue + self.inventory.add_host(unsafe_node_attrs['ID']) + self.inventory.add_host(unsafe_node_attrs['ID'], group=unsafe_node_attrs['Spec']['Role']) + self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host', + unsafe_node_attrs['Status']['Addr']) if self.get_option('include_host_uri'): - self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', - 'tcp://' + self.node_attrs['Status']['Addr'] + ':' + host_uri_port) + self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host_uri', + make_unsafe('tcp://' + unsafe_node_attrs['Status']['Addr'] + ':' + host_uri_port)) if self.get_option('verbose_output'): - self.inventory.set_variable(self.node_attrs['ID'], 'docker_swarm_node_attributes', self.node_attrs) - if 'ManagerStatus' in self.node_attrs: - if self.node_attrs['ManagerStatus'].get('Leader'): + self.inventory.set_variable(unsafe_node_attrs['ID'], 'docker_swarm_node_attributes', unsafe_node_attrs) + if 'ManagerStatus' in unsafe_node_attrs: + if unsafe_node_attrs['ManagerStatus'].get('Leader'): # This is workaround of bug in Docker when in some cases the Leader IP is 0.0.0.0 # Check moby/moby#35437 for details - swarm_leader_ip = parse_address(self.node_attrs['ManagerStatus']['Addr'])[0] or \ - self.node_attrs['Status']['Addr'] + swarm_leader_ip = parse_address(node_attrs['ManagerStatus']['Addr'])[0] or \ + unsafe_node_attrs['Status']['Addr'] if self.get_option('include_host_uri'): - self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host_uri', - 'tcp://' + swarm_leader_ip + ':' + host_uri_port) - self.inventory.set_variable(self.node_attrs['ID'], 'ansible_host', swarm_leader_ip) - self.inventory.add_host(self.node_attrs['ID'], group='leader') + self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host_uri', + make_unsafe('tcp://' + swarm_leader_ip + ':' + host_uri_port)) + self.inventory.set_variable(unsafe_node_attrs['ID'], 'ansible_host', make_unsafe(swarm_leader_ip)) + self.inventory.add_host(unsafe_node_attrs['ID'], group='leader') else: - self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') + self.inventory.add_host(unsafe_node_attrs['ID'], group='nonleaders') else: - self.inventory.add_host(self.node_attrs['ID'], group='nonleaders') + self.inventory.add_host(unsafe_node_attrs['ID'], group='nonleaders') # Use constructed if applicable strict = self.get_option('strict') # Composed variables self._set_composite_vars(self.get_option('compose'), - self.node_attrs, - self.node_attrs['ID'], + unsafe_node_attrs, + unsafe_node_attrs['ID'], strict=strict) # Complex groups based on jinja2 conditionals, hosts that meet the conditional are added to group self._add_host_to_composed_groups(self.get_option('groups'), - self.node_attrs, - self.node_attrs['ID'], + unsafe_node_attrs, + unsafe_node_attrs['ID'], strict=strict) # Create groups based on variable values and add the corresponding hosts to it self._add_host_to_keyed_groups(self.get_option('keyed_groups'), - self.node_attrs, - self.node_attrs['ID'], + unsafe_node_attrs, + unsafe_node_attrs['ID'], strict=strict) except Exception as e: raise AnsibleError('Unable to fetch hosts from Docker swarm API, this was the original exception: %s' % diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py b/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py index d9ec5870d..44f17924f 100644 --- a/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py +++ b/ansible_collections/community/docker/plugins/module_utils/_api/api/client.py @@ -455,7 +455,7 @@ class APIClient( return self._get_result_tty(stream, res, self._check_is_tty(container)) def _get_result_tty(self, stream, res, is_tty): - # We should also use raw streaming (without keep-alives) + # We should also use raw streaming (without keep-alive) # if we're dealing with a tty-enabled container. if is_tty: return self._stream_raw_result(res) if stream else \ diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/errors.py b/ansible_collections/community/docker/plugins/module_utils/_api/errors.py index 90dd5aada..47c284d39 100644 --- a/ansible_collections/community/docker/plugins/module_utils/_api/errors.py +++ b/ansible_collections/community/docker/plugins/module_utils/_api/errors.py @@ -12,6 +12,7 @@ __metaclass__ = type from ._import_helper import HTTPError as _HTTPError +from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six import raise_from @@ -32,7 +33,7 @@ def create_api_error_from_http_exception(e): try: explanation = response.json()['message'] except ValueError: - explanation = (response.content or '').strip() + explanation = to_native((response.content or '').strip()) cls = APIError if response.status_code == 404: if explanation and ('No such image' in str(explanation) or diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/tls.py b/ansible_collections/community/docker/plugins/module_utils/_api/tls.py index ed5416d82..b1e284a5d 100644 --- a/ansible_collections/community/docker/plugins/module_utils/_api/tls.py +++ b/ansible_collections/community/docker/plugins/module_utils/_api/tls.py @@ -39,8 +39,7 @@ class TLSConfig(object): ssl_version = None def __init__(self, client_cert=None, ca_cert=None, verify=None, - ssl_version=None, assert_hostname=None, - assert_fingerprint=None): + ssl_version=None, assert_hostname=None): # Argument compatibility/mapping with # https://docs.docker.com/engine/articles/https/ # This diverges from the Docker CLI in that users can specify 'tls' @@ -48,7 +47,6 @@ class TLSConfig(object): # leaving verify=False self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint # If the user provides an SSL version, we should use their preference if ssl_version: @@ -118,5 +116,4 @@ class TLSConfig(object): client.mount('https://', SSLHTTPAdapter( ssl_version=self.ssl_version, assert_hostname=self.assert_hostname, - assert_fingerprint=self.assert_fingerprint, )) diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py b/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py index e1b5ce020..ed9250d6a 100644 --- a/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py +++ b/ansible_collections/community/docker/plugins/module_utils/_api/transport/ssladapter.py @@ -27,15 +27,11 @@ PoolManager = urllib3.poolmanager.PoolManager class SSLHTTPAdapter(BaseHTTPAdapter): '''An HTTPS Transport Adapter that uses an arbitrary SSL version.''' - __attrs__ = HTTPAdapter.__attrs__ + ['assert_fingerprint', - 'assert_hostname', - 'ssl_version'] + __attrs__ = HTTPAdapter.__attrs__ + ['assert_hostname', 'ssl_version'] - def __init__(self, ssl_version=None, assert_hostname=None, - assert_fingerprint=None, **kwargs): + def __init__(self, ssl_version=None, assert_hostname=None, **kwargs): self.ssl_version = ssl_version self.assert_hostname = assert_hostname - self.assert_fingerprint = assert_fingerprint super(SSLHTTPAdapter, self).__init__(**kwargs) def init_poolmanager(self, connections, maxsize, block=False): @@ -43,9 +39,9 @@ class SSLHTTPAdapter(BaseHTTPAdapter): 'num_pools': connections, 'maxsize': maxsize, 'block': block, - 'assert_hostname': self.assert_hostname, - 'assert_fingerprint': self.assert_fingerprint, } + if self.assert_hostname is not None: + kwargs['assert_hostname'] = self.assert_hostname if self.ssl_version and self.can_override_ssl_version(): kwargs['ssl_version'] = self.ssl_version @@ -60,7 +56,7 @@ class SSLHTTPAdapter(BaseHTTPAdapter): But we still need to take care of when there is a proxy poolmanager """ conn = super(SSLHTTPAdapter, self).get_connection(*args, **kwargs) - if conn.assert_hostname != self.assert_hostname: + if self.assert_hostname is not None and conn.assert_hostname != self.assert_hostname: conn.assert_hostname = self.assert_hostname return conn diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py index 9193ce30e..792aa0cb5 100644 --- a/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py +++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/socket.py @@ -15,7 +15,6 @@ import os import select import socket as pysocket import struct -import sys from ansible.module_utils.six import PY3, binary_type @@ -43,7 +42,7 @@ def read(socket, n=4096): recoverable_errors = (errno.EINTR, errno.EDEADLK, errno.EWOULDBLOCK) if PY3 and not isinstance(socket, NpipeSocket): - if sys.platform == 'win32': + if not hasattr(select, "poll"): # Limited to 1024 select.select([socket], [], []) else: diff --git a/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py b/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py index 910b0dc35..db3718d4d 100644 --- a/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py +++ b/ansible_collections/community/docker/plugins/module_utils/_api/utils/utils.py @@ -160,6 +160,22 @@ def convert_volume_binds(binds): else: mode = 'rw' + # NOTE: this is only relevant for Linux hosts + # (doesn't apply in Docker Desktop) + propagation_modes = [ + 'rshared', + 'shared', + 'rslave', + 'slave', + 'rprivate', + 'private', + ] + if 'propagation' in v and v['propagation'] in propagation_modes: + if mode: + mode = ','.join([mode, v['propagation']]) + else: + mode = v['propagation'] + result.append( text_type('{0}:{1}:{2}').format(k, bind, mode) ) diff --git a/ansible_collections/community/docker/plugins/module_utils/_logfmt.py b/ansible_collections/community/docker/plugins/module_utils/_logfmt.py new file mode 100644 index 000000000..fa45b5754 --- /dev/null +++ b/ansible_collections/community/docker/plugins/module_utils/_logfmt.py @@ -0,0 +1,208 @@ +# Copyright (c) 2024, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +""" +Parse go logfmt messages. + +See https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc for information on the format. +""" + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +# The format is defined in https://pkg.go.dev/github.com/kr/logfmt?utm_source=godoc +# (look for "EBNFish") + + +class InvalidLogFmt(Exception): + pass + + +class _Mode(object): + GARBAGE = 0 + KEY = 1 + EQUAL = 2 + IDENT_VALUE = 3 + QUOTED_VALUE = 4 + + +_ESCAPE_DICT = { + '"': '"', + '\\': '\\', + "'": "'", + '/': '/', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', +} + +_HEX_DICT = { + '0': 0, + '1': 1, + '2': 2, + '3': 3, + '4': 4, + '5': 5, + '6': 6, + '7': 7, + '8': 8, + '9': 9, + 'a': 0xA, + 'b': 0xB, + 'c': 0xC, + 'd': 0xD, + 'e': 0xE, + 'f': 0xF, + 'A': 0xA, + 'B': 0xB, + 'C': 0xC, + 'D': 0xD, + 'E': 0xE, + 'F': 0xF, +} + + +def _is_ident(cur): + return cur > ' ' and cur not in ('"', '=') + + +class _Parser(object): + def __init__(self, line): + self.line = line + self.index = 0 + self.length = len(line) + + def done(self): + return self.index >= self.length + + def cur(self): + return self.line[self.index] + + def next(self): + self.index += 1 + + def prev(self): + self.index -= 1 + + def parse_unicode_sequence(self): + if self.index + 6 > self.length: + raise InvalidLogFmt('Not enough space for unicode escape') + if self.line[self.index:self.index + 2] != '\\u': + raise InvalidLogFmt('Invalid unicode escape start') + v = 0 + for i in range(self.index + 2, self.index + 6): + v <<= 4 + try: + v += _HEX_DICT[self.line[self.index]] + except KeyError: + raise InvalidLogFmt('Invalid unicode escape digit {digit!r}'.format(digit=self.line[self.index])) + self.index += 6 + return chr(v) + + +def parse_line(line, logrus_mode=False): + result = {} + parser = _Parser(line) + key = [] + value = [] + mode = _Mode.GARBAGE + + def handle_kv(has_no_value=False): + k = ''.join(key) + v = None if has_no_value else ''.join(value) + result[k] = v + del key[:] + del value[:] + + def parse_garbage(cur): + if _is_ident(cur): + return _Mode.KEY + parser.next() + return _Mode.GARBAGE + + def parse_key(cur): + if _is_ident(cur): + key.append(cur) + parser.next() + return _Mode.KEY + elif cur == '=': + parser.next() + return _Mode.EQUAL + else: + if logrus_mode: + raise InvalidLogFmt('Key must always be followed by "=" in logrus mode') + handle_kv(has_no_value=True) + parser.next() + return _Mode.GARBAGE + + def parse_equal(cur): + if _is_ident(cur): + value.append(cur) + parser.next() + return _Mode.IDENT_VALUE + elif cur == '"': + parser.next() + return _Mode.QUOTED_VALUE + else: + handle_kv() + parser.next() + return _Mode.GARBAGE + + def parse_ident_value(cur): + if _is_ident(cur): + value.append(cur) + parser.next() + return _Mode.IDENT_VALUE + else: + handle_kv() + parser.next() + return _Mode.GARBAGE + + def parse_quoted_value(cur): + if cur == '\\': + parser.next() + if parser.done(): + raise InvalidLogFmt('Unterminated escape sequence in quoted string') + cur = parser.cur() + if cur in _ESCAPE_DICT: + value.append(_ESCAPE_DICT[cur]) + elif cur != 'u': + raise InvalidLogFmt('Unknown escape sequence {seq!r}'.format(seq='\\' + cur)) + else: + parser.prev() + value.append(parser.parse_unicode_sequence()) + parser.next() + return _Mode.QUOTED_VALUE + elif cur == '"': + handle_kv() + parser.next() + return _Mode.GARBAGE + elif cur < ' ': + raise InvalidLogFmt('Control characters in quoted string are not allowed') + else: + value.append(cur) + parser.next() + return _Mode.QUOTED_VALUE + + parsers = { + _Mode.GARBAGE: parse_garbage, + _Mode.KEY: parse_key, + _Mode.EQUAL: parse_equal, + _Mode.IDENT_VALUE: parse_ident_value, + _Mode.QUOTED_VALUE: parse_quoted_value, + } + while not parser.done(): + mode = parsers[mode](parser.cur()) + if mode == _Mode.KEY and logrus_mode: + raise InvalidLogFmt('Key must always be followed by "=" in logrus mode') + if mode == _Mode.KEY or mode == _Mode.EQUAL: + handle_kv(has_no_value=True) + elif mode == _Mode.IDENT_VALUE: + handle_kv() + elif mode == _Mode.QUOTED_VALUE: + raise InvalidLogFmt('Unterminated quoted string') + return result diff --git a/ansible_collections/community/docker/plugins/module_utils/_platform.py b/ansible_collections/community/docker/plugins/module_utils/_platform.py new file mode 100644 index 000000000..4b6216f74 --- /dev/null +++ b/ansible_collections/community/docker/plugins/module_utils/_platform.py @@ -0,0 +1,179 @@ +# This code is part of the Ansible collection community.docker, but is an independent component. +# This particular file, and this file only, is based on containerd's platforms Go module +# (https://github.com/containerd/containerd/tree/main/platforms) +# +# Copyright (c) 2023 Felix Fontein <felix@fontein.de> +# Copyright The containerd Authors +# +# It is licensed under the Apache 2.0 license (see LICENSES/Apache-2.0.txt in this collection) +# SPDX-License-Identifier: Apache-2.0 + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import re + + +_VALID_STR = re.compile('^[A-Za-z0-9_-]+$') + + +def _validate_part(string, part, part_name): + if not part: + raise ValueError('Invalid platform string "{string}": {part} is empty'.format(string=string, part=part_name)) + if not _VALID_STR.match(part): + raise ValueError('Invalid platform string "{string}": {part} has invalid characters'.format(string=string, part=part_name)) + return part + + +# See https://github.com/containerd/containerd/blob/main/platforms/database.go#L32-L38 +_KNOWN_OS = ( + "aix", "android", "darwin", "dragonfly", "freebsd", "hurd", "illumos", "ios", "js", + "linux", "nacl", "netbsd", "openbsd", "plan9", "solaris", "windows", "zos", +) + +# See https://github.com/containerd/containerd/blob/main/platforms/database.go#L54-L60 +_KNOWN_ARCH = ( + "386", "amd64", "amd64p32", "arm", "armbe", "arm64", "arm64be", "ppc64", "ppc64le", + "loong64", "mips", "mipsle", "mips64", "mips64le", "mips64p32", "mips64p32le", + "ppc", "riscv", "riscv64", "s390", "s390x", "sparc", "sparc64", "wasm", +) + + +def _normalize_os(os_str): + # See normalizeOS() in https://github.com/containerd/containerd/blob/main/platforms/database.go + os_str = os_str.lower() + if os_str == 'macos': + os_str = 'darwin' + return os_str + + +_NORMALIZE_ARCH = { + ("i386", None): ("386", ""), + ("x86_64", "v1"): ("amd64", ""), + ("x86-64", "v1"): ("amd64", ""), + ("amd64", "v1"): ("amd64", ""), + ("x86_64", None): ("amd64", None), + ("x86-64", None): ("amd64", None), + ("amd64", None): ("amd64", None), + ("aarch64", "8"): ("arm64", ""), + ("arm64", "8"): ("arm64", ""), + ("aarch64", "v8"): ("arm64", ""), + ("arm64", "v8"): ("arm64", ""), + ("aarch64", None): ("arm64", None), + ("arm64", None): ("arm64", None), + ("armhf", None): ("arm", "v7"), + ("armel", None): ("arm", "v6"), + ("arm", ""): ("arm", "v7"), + ("arm", "5"): ("arm", "v5"), + ("arm", "6"): ("arm", "v6"), + ("arm", "7"): ("arm", "v7"), + ("arm", "8"): ("arm", "v8"), + ("arm", None): ("arm", None), +} + + +def _normalize_arch(arch_str, variant_str): + # See normalizeArch() in https://github.com/containerd/containerd/blob/main/platforms/database.go + arch_str = arch_str.lower() + variant_str = variant_str.lower() + res = _NORMALIZE_ARCH.get((arch_str, variant_str)) + if res is None: + res = _NORMALIZE_ARCH.get((arch_str, None)) + if res is None: + return arch_str, variant_str + if res is not None: + arch_str = res[0] + if res[1] is not None: + variant_str = res[1] + return arch_str, variant_str + + +class _Platform(object): + def __init__(self, os=None, arch=None, variant=None): + self.os = os + self.arch = arch + self.variant = variant + if variant is not None: + if arch is None: + raise ValueError('If variant is given, architecture must be given too') + if os is None: + raise ValueError('If variant is given, os must be given too') + + @classmethod + def parse_platform_string(cls, string, daemon_os=None, daemon_arch=None): + # See Parse() in https://github.com/containerd/containerd/blob/main/platforms/platforms.go + if string is None: + return cls() + if not string: + raise ValueError('Platform string must be non-empty') + parts = string.split('/', 2) + arch = None + variant = None + if len(parts) == 1: + _validate_part(string, string, 'OS/architecture') + # The part is either OS or architecture + os = _normalize_os(string) + if os in _KNOWN_OS: + if daemon_arch is not None: + arch, variant = _normalize_arch(daemon_arch, '') + return cls(os=os, arch=arch, variant=variant) + arch, variant = _normalize_arch(os, '') + if arch in _KNOWN_ARCH: + return cls( + os=_normalize_os(daemon_os) if daemon_os else None, + arch=arch or None, + variant=variant or None, + ) + raise ValueError('Invalid platform string "{0}": unknown OS or architecture'.format(string)) + os = _validate_part(string, parts[0], 'OS') + if not os: + raise ValueError('Invalid platform string "{0}": OS is empty'.format(string)) + arch = _validate_part(string, parts[1], 'architecture') if len(parts) > 1 else None + if arch is not None and not arch: + raise ValueError('Invalid platform string "{0}": architecture is empty'.format(string)) + variant = _validate_part(string, parts[2], 'variant') if len(parts) > 2 else None + if variant is not None and not variant: + raise ValueError('Invalid platform string "{0}": variant is empty'.format(string)) + arch, variant = _normalize_arch(arch, variant or '') + if len(parts) == 2 and arch == 'arm' and variant == 'v7': + variant = None + if len(parts) == 3 and arch == 'arm64' and variant == '': + variant = 'v8' + return cls(os=_normalize_os(os), arch=arch, variant=variant or None) + + def __str__(self): + if self.variant: + parts = [self.os, self.arch, self.variant] + elif self.os: + if self.arch: + parts = [self.os, self.arch] + else: + parts = [self.os] + elif self.arch is not None: + parts = [self.arch] + else: + parts = [] + return '/'.join(parts) + + def __repr__(self): + return '_Platform(os={os!r}, arch={arch!r}, variant={variant!r})'.format(os=self.os, arch=self.arch, variant=self.variant) + + def __eq__(self, other): + return self.os == other.os and self.arch == other.arch and self.variant == other.variant + + +def normalize_platform_string(string, daemon_os=None, daemon_arch=None): + return str(_Platform.parse_platform_string(string, daemon_os=daemon_os, daemon_arch=daemon_arch)) + + +def compose_platform_string(os=None, arch=None, variant=None, daemon_os=None, daemon_arch=None): + if os is None and daemon_os is not None: + os = _normalize_os(daemon_os) + if arch is None and daemon_arch is not None: + arch, variant = _normalize_arch(daemon_arch, variant or '') + variant = variant or None + return str(_Platform(os=os, arch=arch, variant=variant or None)) + + +def compare_platform_strings(string1, string2): + return _Platform.parse_platform_string(string1) == _Platform.parse_platform_string(string2) diff --git a/ansible_collections/community/docker/plugins/module_utils/common.py b/ansible_collections/community/docker/plugins/module_utils/common.py index e6a06ed65..d1dcf3e60 100644 --- a/ansible_collections/community/docker/plugins/module_utils/common.py +++ b/ansible_collections/community/docker/plugins/module_utils/common.py @@ -122,6 +122,32 @@ if not HAS_DOCKER_PY: def _get_tls_config(fail_function, **kwargs): + if 'ssl_version' in kwargs and LooseVersion(docker_version) >= LooseVersion('7.0.0b1'): + ssl_version = kwargs.pop('ssl_version') + if ssl_version is not None: + fail_function( + "ssl_version is not compatible with Docker SDK for Python 7.0.0+. You are using" + " Docker SDK for Python {docker_py_version}. The ssl_version option (value: {ssl_version})" + " has either been set directly or with the environment variable DOCKER_SSL_VERSION." + " Make sure it is not set, or switch to an older version of Docker SDK for Python.".format( + docker_py_version=docker_version, + ssl_version=ssl_version, + ) + ) + if 'assert_hostname' in kwargs and LooseVersion(docker_version) >= LooseVersion('7.0.0b1'): + assert_hostname = kwargs.pop('assert_hostname') + if assert_hostname is not None: + fail_function( + "tls_hostname is not compatible with Docker SDK for Python 7.0.0+. You are using" + " Docker SDK for Python {docker_py_version}. The tls_hostname option (value: {tls_hostname})" + " has either been set directly or with the environment variable DOCKER_TLS_HOSTNAME." + " Make sure it is not set, or switch to an older version of Docker SDK for Python.".format( + docker_py_version=docker_version, + tls_hostname=assert_hostname, + ) + ) + # Filter out all None parameters + kwargs = dict((k, v) for k, v in kwargs.items() if v is not None) try: tls_config = TLSConfig(**kwargs) return tls_config @@ -234,12 +260,8 @@ class AnsibleDockerClientBase(Client): def log(self, msg, pretty_print=False): pass # if self.debug: - # log_file = open('docker.log', 'a') - # if pretty_print: - # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) - # log_file.write(u'\n') - # else: - # log_file.write(msg + u'\n') + # from .util import log_debug + # log_debug(msg, pretty_print=pretty_print) @abc.abstractmethod def fail(self, msg, **kwargs): @@ -309,7 +331,7 @@ class AnsibleDockerClientBase(Client): 'DOCKER_TLS_HOSTNAME', None, type='str'), api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION', 'auto', type='str'), - cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None, type='str'), + cacert_path=self._get_value('cacert_path', params['ca_path'], 'DOCKER_CERT_PATH', None, type='str'), cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'), key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'), ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None, type='str'), @@ -456,7 +478,7 @@ class AnsibleDockerClientBase(Client): images = self._image_lookup(lookup, tag) if len(images) > 1: - self.fail("Registry returned more than one result for %s:%s" % (name, tag)) + self.fail("Daemon returned more than one result for %s:%s" % (name, tag)) if len(images) == 1: try: diff --git a/ansible_collections/community/docker/plugins/module_utils/common_api.py b/ansible_collections/community/docker/plugins/module_utils/common_api.py index 7d46a153a..b5ea42fa3 100644 --- a/ansible_collections/community/docker/plugins/module_utils/common_api.py +++ b/ansible_collections/community/docker/plugins/module_utils/common_api.py @@ -131,12 +131,8 @@ class AnsibleDockerClientBase(Client): def log(self, msg, pretty_print=False): pass # if self.debug: - # log_file = open('docker.log', 'a') - # if pretty_print: - # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) - # log_file.write(u'\n') - # else: - # log_file.write(msg + u'\n') + # from .util import log_debug + # log_debug(msg, pretty_print=pretty_print) @abc.abstractmethod def fail(self, msg, **kwargs): @@ -206,7 +202,7 @@ class AnsibleDockerClientBase(Client): 'DOCKER_TLS_HOSTNAME', None, type='str'), api_version=self._get_value('api_version', params['api_version'], 'DOCKER_API_VERSION', 'auto', type='str'), - cacert_path=self._get_value('cacert_path', params['ca_cert'], 'DOCKER_CERT_PATH', None, type='str'), + cacert_path=self._get_value('cacert_path', params['ca_path'], 'DOCKER_CERT_PATH', None, type='str'), cert_path=self._get_value('cert_path', params['client_cert'], 'DOCKER_CERT_PATH', None, type='str'), key_path=self._get_value('key_path', params['client_key'], 'DOCKER_CERT_PATH', None, type='str'), ssl_version=self._get_value('ssl_version', params['ssl_version'], 'DOCKER_SSL_VERSION', None, type='str'), @@ -396,7 +392,7 @@ class AnsibleDockerClientBase(Client): images = self._image_lookup(lookup, tag) if len(images) > 1: - self.fail("Registry returned more than one result for %s:%s" % (name, tag)) + self.fail("Daemon returned more than one result for %s:%s" % (name, tag)) if len(images) == 1: try: diff --git a/ansible_collections/community/docker/plugins/module_utils/common_cli.py b/ansible_collections/community/docker/plugins/module_utils/common_cli.py new file mode 100644 index 000000000..60d539877 --- /dev/null +++ b/ansible_collections/community/docker/plugins/module_utils/common_cli.py @@ -0,0 +1,339 @@ +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import abc +import json +import shlex + +from ansible.module_utils.basic import AnsibleModule, env_fallback +from ansible.module_utils.common.process import get_bin_path +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion + +from ansible_collections.community.docker.plugins.module_utils._api.auth import resolve_repository_name + +from ansible_collections.community.docker.plugins.module_utils.util import ( # noqa: F401, pylint: disable=unused-import + DEFAULT_DOCKER_HOST, + DEFAULT_TLS, + DEFAULT_TLS_VERIFY, + DOCKER_MUTUALLY_EXCLUSIVE, + DOCKER_REQUIRED_TOGETHER, + sanitize_result, +) + + +DOCKER_COMMON_ARGS = dict( + docker_cli=dict(type='path'), + docker_host=dict(type='str', default=DEFAULT_DOCKER_HOST, fallback=(env_fallback, ['DOCKER_HOST']), aliases=['docker_url']), + tls_hostname=dict(type='str', fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])), + api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']), + ca_path=dict(type='path', aliases=['ca_cert', 'tls_ca_cert', 'cacert_path']), + client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']), + client_key=dict(type='path', aliases=['tls_client_key', 'key_path']), + tls=dict(type='bool', default=DEFAULT_TLS, fallback=(env_fallback, ['DOCKER_TLS'])), + validate_certs=dict(type='bool', default=DEFAULT_TLS_VERIFY, fallback=(env_fallback, ['DOCKER_TLS_VERIFY']), aliases=['tls_verify']), + # debug=dict(type='bool', default=False), + cli_context=dict(type='str'), +) + + +class DockerException(Exception): + pass + + +class AnsibleDockerClientBase(object): + def __init__(self, common_args, min_docker_api_version=None): + self._environment = {} + if common_args['tls_hostname']: + self._environment['DOCKER_TLS_HOSTNAME'] = common_args['tls_hostname'] + if common_args['api_version'] and common_args['api_version'] != 'auto': + self._environment['DOCKER_API_VERSION'] = common_args['api_version'] + self._cli = common_args.get('docker_cli') + if self._cli is None: + try: + self._cli = get_bin_path('docker') + except ValueError: + self.fail('Cannot find docker CLI in path. Please provide it explicitly with the docker_cli parameter') + + self._cli_base = [self._cli] + self._cli_base.extend(['--host', common_args['docker_host']]) + if common_args['validate_certs']: + self._cli_base.append('--tlsverify') + elif common_args['tls']: + self._cli_base.append('--tls') + if common_args['ca_path']: + self._cli_base.extend(['--tlscacert', common_args['ca_path']]) + if common_args['client_cert']: + self._cli_base.extend(['--tlscert', common_args['client_cert']]) + if common_args['client_key']: + self._cli_base.extend(['--tlskey', common_args['client_key']]) + if common_args['cli_context']: + self._cli_base.extend(['--context', common_args['cli_context']]) + + # `--format json` was only added as a shorthand for `--format {{ json . }}` in Docker 23.0 + dummy, self._version, dummy = self.call_cli_json('version', '--format', '{{ json . }}', check_rc=True) + self._info = None + + self.docker_api_version_str = self._version['Server']['ApiVersion'] + self.docker_api_version = LooseVersion(self.docker_api_version_str) + min_docker_api_version = min_docker_api_version or '1.25' + if self.docker_api_version < LooseVersion(min_docker_api_version): + self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version)) + + def log(self, msg, pretty_print=False): + pass + # if self.debug: + # from .util import log_debug + # log_debug(msg, pretty_print=pretty_print) + + def get_cli(self): + return self._cli + + def get_version_info(self): + return self._version + + def _compose_cmd(self, args): + return self._cli_base + list(args) + + def _compose_cmd_str(self, args): + return ' '.join(shlex.quote(a) for a in self._compose_cmd(args)) + + @abc.abstractmethod + # def call_cli(self, *args, check_rc=False, data=None, cwd=None, environ_update=None): + def call_cli(self, *args, **kwargs): + # Python 2.7 doesn't like anything than '**kwargs' after '*args', so we have to do this manually... + pass + + # def call_cli_json(self, *args, check_rc=False, data=None, cwd=None, environ_update=None, warn_on_stderr=False): + def call_cli_json(self, *args, **kwargs): + warn_on_stderr = kwargs.pop('warn_on_stderr', False) + rc, stdout, stderr = self.call_cli(*args, **kwargs) + if warn_on_stderr and stderr: + self.warn(to_native(stderr)) + try: + data = json.loads(stdout) + except Exception as exc: + self.fail('Error while parsing JSON output of {cmd}: {exc}\nJSON output: {stdout}'.format( + cmd=self._compose_cmd_str(args), + exc=to_native(exc), + stdout=to_native(stdout), + )) + return rc, data, stderr + + # def call_cli_json_stream(self, *args, check_rc=False, data=None, cwd=None, environ_update=None, warn_on_stderr=False): + def call_cli_json_stream(self, *args, **kwargs): + warn_on_stderr = kwargs.pop('warn_on_stderr', False) + rc, stdout, stderr = self.call_cli(*args, **kwargs) + if warn_on_stderr and stderr: + self.warn(to_native(stderr)) + result = [] + try: + for line in stdout.splitlines(): + line = line.strip() + if line.startswith(b'{'): + result.append(json.loads(line)) + except Exception as exc: + self.fail('Error while parsing JSON output of {cmd}: {exc}\nJSON output: {stdout}'.format( + cmd=self._compose_cmd_str(args), + exc=to_native(exc), + stdout=to_native(stdout), + )) + return rc, result, stderr + + @abc.abstractmethod + def fail(self, msg, **kwargs): + pass + + @abc.abstractmethod + def warn(self, msg): + pass + + @abc.abstractmethod + def deprecate(self, msg, version=None, date=None, collection_name=None): + pass + + def get_cli_info(self): + if self._info is None: + dummy, self._info, dummy = self.call_cli_json('info', '--format', '{{ json . }}', check_rc=True) + return self._info + + def get_client_plugin_info(self, component): + for plugin in self.get_cli_info()['ClientInfo'].get('Plugins') or []: + if plugin.get('Name') == component: + return plugin + return None + + def _image_lookup(self, name, tag): + ''' + Including a tag in the name parameter sent to the Docker SDK for Python images method + does not work consistently. Instead, get the result set for name and manually check + if the tag exists. + ''' + dummy, images, dummy = self.call_cli_json_stream( + 'image', 'ls', '--format', '{{ json . }}', '--no-trunc', '--filter', 'reference={0}'.format(name), + check_rc=True, + ) + if tag: + lookup = "%s:%s" % (name, tag) + lookup_digest = "%s@%s" % (name, tag) + response = images + images = [] + for image in response: + if image.get('Tag') == tag or image.get('Digest') == tag: + images = [image] + break + return images + + def find_image(self, name, tag): + ''' + Lookup an image (by name and tag) and return the inspection results. + ''' + if not name: + return None + + self.log("Find image %s:%s" % (name, tag)) + images = self._image_lookup(name, tag) + if not images: + # In API <= 1.20 seeing 'docker.io/<name>' as the name of images pulled from docker hub + registry, repo_name = resolve_repository_name(name) + if registry == 'docker.io': + # If docker.io is explicitly there in name, the image + # isn't found in some cases (#41509) + self.log("Check for docker.io image: %s" % repo_name) + images = self._image_lookup(repo_name, tag) + if not images and repo_name.startswith('library/'): + # Sometimes library/xxx images are not found + lookup = repo_name[len('library/'):] + self.log("Check for docker.io image: %s" % lookup) + images = self._image_lookup(lookup, tag) + if not images: + # Last case for some Docker versions: if docker.io wasn't there, + # it can be that the image wasn't found either + # (https://github.com/ansible/ansible/pull/15586) + lookup = "%s/%s" % (registry, repo_name) + self.log("Check for docker.io image: %s" % lookup) + images = self._image_lookup(lookup, tag) + if not images and '/' not in repo_name: + # This seems to be happening with podman-docker + # (https://github.com/ansible-collections/community.docker/issues/291) + lookup = "%s/library/%s" % (registry, repo_name) + self.log("Check for docker.io image: %s" % lookup) + images = self._image_lookup(lookup, tag) + + if len(images) > 1: + self.fail("Daemon returned more than one result for %s:%s" % (name, tag)) + + if len(images) == 1: + rc, image, stderr = self.call_cli_json('image', 'inspect', images[0]['ID']) + if not image: + self.log("Image %s:%s not found." % (name, tag)) + return None + if rc != 0: + self.fail("Error inspecting image %s:%s - %s" % (name, tag, to_native(stderr))) + return image[0] + + self.log("Image %s:%s not found." % (name, tag)) + return None + + def find_image_by_id(self, image_id, accept_missing_image=False): + ''' + Lookup an image (by ID) and return the inspection results. + ''' + if not image_id: + return None + + self.log("Find image %s (by ID)" % image_id) + rc, image, stderr = self.call_cli_json('image', 'inspect', image_id) + if not image: + if not accept_missing_image: + self.fail("Error inspecting image ID %s - %s" % (image_id, to_native(stderr))) + self.log("Image %s not found." % image_id) + return None + if rc != 0: + self.fail("Error inspecting image ID %s - %s" % (image_id, to_native(stderr))) + return image[0] + + +class AnsibleModuleDockerClient(AnsibleDockerClientBase): + def __init__(self, argument_spec=None, supports_check_mode=False, mutually_exclusive=None, + required_together=None, required_if=None, required_one_of=None, required_by=None, + min_docker_api_version=None, fail_results=None): + + # Modules can put information in here which will always be returned + # in case client.fail() is called. + self.fail_results = fail_results or {} + + merged_arg_spec = dict() + merged_arg_spec.update(DOCKER_COMMON_ARGS) + if argument_spec: + merged_arg_spec.update(argument_spec) + self.arg_spec = merged_arg_spec + + mutually_exclusive_params = [] + mutually_exclusive_params += DOCKER_MUTUALLY_EXCLUSIVE + if mutually_exclusive: + mutually_exclusive_params += mutually_exclusive + + required_together_params = [] + required_together_params += DOCKER_REQUIRED_TOGETHER + if required_together: + required_together_params += required_together + + self.module = AnsibleModule( + argument_spec=merged_arg_spec, + supports_check_mode=supports_check_mode, + mutually_exclusive=mutually_exclusive_params, + required_together=required_together_params, + required_if=required_if, + required_one_of=required_one_of, + required_by=required_by or {}, + ) + + self.debug = False # self.module.params['debug'] + self.check_mode = self.module.check_mode + self.diff = self.module._diff + + common_args = dict((k, self.module.params[k]) for k in DOCKER_COMMON_ARGS) + super(AnsibleModuleDockerClient, self).__init__(common_args, min_docker_api_version=min_docker_api_version) + + # def call_cli(self, *args, check_rc=False, data=None, cwd=None, environ_update=None): + def call_cli(self, *args, **kwargs): + # Python 2.7 doesn't like anything than '**kwargs' after '*args', so we have to do this manually... + check_rc = kwargs.pop('check_rc', False) + data = kwargs.pop('data', None) + cwd = kwargs.pop('cwd', None) + environ_update = kwargs.pop('environ_update', None) + if kwargs: + raise TypeError("call_cli() got an unexpected keyword argument '%s'" % list(kwargs)[0]) + + environment = self._environment.copy() + if environ_update: + environment.update(environ_update) + rc, stdout, stderr = self.module.run_command( + self._compose_cmd(args), + binary_data=True, + check_rc=check_rc, + cwd=cwd, + data=data, + encoding=None, + environ_update=environment, + expand_user_and_vars=False, + ignore_invalid_cwd=False, + ) + return rc, stdout, stderr + + def fail(self, msg, **kwargs): + self.fail_results.update(kwargs) + self.module.fail_json(msg=msg, **sanitize_result(self.fail_results)) + + def warn(self, msg): + self.module.warn(msg) + + def deprecate(self, msg, version=None, date=None, collection_name=None): + self.module.deprecate(msg, version=version, date=date, collection_name=collection_name) diff --git a/ansible_collections/community/docker/plugins/module_utils/compose_v2.py b/ansible_collections/community/docker/plugins/module_utils/compose_v2.py new file mode 100644 index 000000000..92f109269 --- /dev/null +++ b/ansible_collections/community/docker/plugins/module_utils/compose_v2.py @@ -0,0 +1,618 @@ +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# Copyright (c) 2023, Léo El Amri (@lel-amri) +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +import os +import re +from collections import namedtuple + +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.six.moves import shlex_quote + +from ansible_collections.community.docker.plugins.module_utils.util import DockerBaseClass +from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion +from ansible_collections.community.docker.plugins.module_utils._logfmt import ( + InvalidLogFmt as _InvalidLogFmt, + parse_line as _parse_logfmt_line, +) + + +DOCKER_COMPOSE_FILES = ('compose.yaml', 'compose.yml', 'docker-compose.yaml', 'docker-compose.yml') + +DOCKER_STATUS_DONE = frozenset(( + 'Started', + 'Healthy', + 'Exited', + 'Restarted', + 'Running', + 'Created', + 'Stopped', + 'Killed', + 'Removed', + # An extra, specific to containers + 'Recreated', + # Extras for pull events + 'Pulled', +)) +DOCKER_STATUS_WORKING = frozenset(( + 'Creating', + 'Starting', + 'Restarting', + 'Stopping', + 'Killing', + 'Removing', + # An extra, specific to containers + 'Recreate', + # Extras for pull events + 'Pulling', + # Extras for build start events + 'Building', +)) +DOCKER_STATUS_PULL = frozenset(( + 'Pulled', + 'Pulling', +)) +DOCKER_STATUS_ERROR = frozenset(( + 'Error', +)) +DOCKER_STATUS_WARNING = frozenset(( + 'Warning', +)) +DOCKER_STATUS_WAITING = frozenset(( + 'Waiting', +)) +DOCKER_STATUS = frozenset(DOCKER_STATUS_DONE | DOCKER_STATUS_WORKING | DOCKER_STATUS_PULL | DOCKER_STATUS_ERROR | DOCKER_STATUS_WAITING) + +DOCKER_PULL_PROGRESS_DONE = frozenset(( + 'Already exists', + 'Download complete', + 'Pull complete', +)) +DOCKER_PULL_PROGRESS_WORKING = frozenset(( + 'Pulling fs layer', + 'Waiting', + 'Downloading', + 'Verifying Checksum', + 'Extracting', +)) + + +class ResourceType(object): + UNKNOWN = "unknown" + NETWORK = "network" + IMAGE = "image" + IMAGE_LAYER = "image-layer" + VOLUME = "volume" + CONTAINER = "container" + SERVICE = "service" + + @classmethod + def from_docker_compose_event(cls, resource_type): + # type: (Type[ResourceType], Text) -> Any + return { + "Network": cls.NETWORK, + "Image": cls.IMAGE, + "Volume": cls.VOLUME, + "Container": cls.CONTAINER, + }[resource_type] + + +Event = namedtuple( + 'Event', + ['resource_type', 'resource_id', 'status', 'msg'] +) + + +_DRY_RUN_MARKER = 'DRY-RUN MODE -' + +_RE_RESOURCE_EVENT = re.compile( + r'^' + r'\s*' + r'(?P<resource_type>Network|Image|Volume|Container)' + r'\s+' + r'(?P<resource_id>\S+)' + r'\s+' + r'(?P<status>\S(?:|.*\S))' + r'\s*' + r'$' +) + +_RE_PULL_EVENT = re.compile( + r'^' + r'\s*' + r'(?P<service>\S+)' + r'\s+' + r'(?P<status>%s)' + r'\s*' + r'$' + % '|'.join(re.escape(status) for status in DOCKER_STATUS_PULL) +) + +_RE_PULL_PROGRESS = re.compile( + r'^' + r'\s*' + r'(?P<layer>\S+)' + r'\s+' + r'(?P<status>%s)' + r'\s*' + r'(?:|\s\[[^]]+\]\s+\S+\s*|\s+[0-9.kKmMgGbB]+/[0-9.kKmMgGbB]+\s*)' + r'$' + % '|'.join(re.escape(status) for status in sorted(DOCKER_PULL_PROGRESS_DONE | DOCKER_PULL_PROGRESS_WORKING)) +) + +_RE_ERROR_EVENT = re.compile( + r'^' + r'\s*' + r'(?P<resource_id>\S+)' + r'\s+' + r'(?P<status>%s)' + r'\s*' + r'(?P<msg>\S.*\S)?' + r'$' + % '|'.join(re.escape(status) for status in DOCKER_STATUS_ERROR) +) + +_RE_WARNING_EVENT = re.compile( + r'^' + r'\s*' + r'(?P<resource_id>\S+)' + r'\s+' + r'(?P<status>%s)' + r'\s*' + r'(?P<msg>\S.*\S)?' + r'$' + % '|'.join(re.escape(status) for status in DOCKER_STATUS_WARNING) +) + +_RE_CONTINUE_EVENT = re.compile( + r'^' + r'\s*' + r'(?P<resource_id>\S+)' + r'\s+' + r'-' + r'\s*' + r'(?P<msg>\S(?:|.*\S))' + r'$' +) + +_RE_SKIPPED_EVENT = re.compile( + r'^' + r'\s*' + r'(?P<resource_id>\S+)' + r'\s+' + r'Skipped -' + r'\s*' + r'(?P<msg>\S(?:|.*\S))' + r'$' +) + +_RE_BUILD_START_EVENT = re.compile( + r'^' + r'\s*' + r'build service' + r'\s+' + r'(?P<resource_id>\S+)' + r'$' +) + +_RE_BUILD_PROGRESS_EVENT = re.compile( + r'^' + r'\s*' + r'==>' + r'\s+' + r'(?P<msg>.*)' + r'$' +) + +# The following needs to be kept in sync with the MINIMUM_VERSION compose_v2 docs fragment +MINIMUM_COMPOSE_VERSION = '2.18.0' + + +def _extract_event(line, warn_function=None): + match = _RE_RESOURCE_EVENT.match(line) + if match is not None: + status = match.group('status') + msg = None + if status not in DOCKER_STATUS: + status, msg = msg, status + return Event( + ResourceType.from_docker_compose_event(match.group('resource_type')), + match.group('resource_id'), + status, + msg, + ), True + match = _RE_PULL_EVENT.match(line) + if match: + return Event( + ResourceType.SERVICE, + match.group('service'), + match.group('status'), + None, + ), True + match = _RE_ERROR_EVENT.match(line) + if match: + return Event( + ResourceType.UNKNOWN, + match.group('resource_id'), + match.group('status'), + match.group('msg') or None, + ), True + match = _RE_WARNING_EVENT.match(line) + if match: + if warn_function: + if match.group('msg'): + msg = '{rid}: {msg}' + else: + msg = 'Unspecified warning for {rid}' + warn_function(msg.format(rid=match.group('resource_id'), msg=match.group('msg'))) + return None, True + match = _RE_PULL_PROGRESS.match(line) + if match: + return Event( + ResourceType.IMAGE_LAYER, + match.group('layer'), + match.group('status'), + None, + ), True + match = _RE_SKIPPED_EVENT.match(line) + if match: + return Event( + ResourceType.UNKNOWN, + match.group('resource_id'), + 'Skipped', + match.group('msg'), + ), True + match = _RE_BUILD_START_EVENT.match(line) + if match: + return Event( + ResourceType.SERVICE, + match.group('resource_id'), + 'Building', + None, + ), True + return None, False + + +def _extract_logfmt_event(line, warn_function=None): + try: + result = _parse_logfmt_line(line, logrus_mode=True) + except _InvalidLogFmt: + return None, False + if 'time' not in result or 'level' not in result or 'msg' not in result: + return None, False + if result['level'] == 'warning': + if warn_function: + warn_function(result['msg']) + return None, True + # TODO: no idea what to do with this + return None, False + + +def _warn_missing_dry_run_prefix(line, warn_missing_dry_run_prefix, warn_function): + if warn_missing_dry_run_prefix and warn_function: + # This could be a bug, a change of docker compose's output format, ... + # Tell the user to report it to us :-) + warn_function( + 'Event line is missing dry-run mode marker: {0!r}. Please report this at ' + 'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md' + .format(line) + ) + + +def _warn_unparsable_line(line, warn_function): + # This could be a bug, a change of docker compose's output format, ... + # Tell the user to report it to us :-) + if warn_function: + warn_function( + 'Cannot parse event from line: {0!r}. Please report this at ' + 'https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md' + .format(line) + ) + + +def _find_last_event_for(events, resource_id): + for index, event in enumerate(reversed(events)): + if event.resource_id == resource_id: + return len(events) - 1 - index, event + return None + + +def _concat_event_msg(event, append_msg): + return Event( + event.resource_type, + event.resource_id, + event.status, + '\n'.join(msg for msg in [event.msg, append_msg] if msg is not None), + ) + + +def parse_events(stderr, dry_run=False, warn_function=None): + events = [] + error_event = None + stderr_lines = stderr.splitlines() + if stderr_lines and stderr_lines[-1] == b'': + del stderr_lines[-1] + for line in stderr_lines: + line = to_native(line.strip()) + if not line: + continue + warn_missing_dry_run_prefix = False + if dry_run: + if line.startswith(_DRY_RUN_MARKER): + line = line[len(_DRY_RUN_MARKER):].lstrip() + else: + warn_missing_dry_run_prefix = True + event, parsed = _extract_event(line, warn_function=warn_function) + if event is not None: + events.append(event) + if event.status in DOCKER_STATUS_ERROR: + error_event = event + else: + error_event = None + _warn_missing_dry_run_prefix(line, warn_missing_dry_run_prefix, warn_function) + continue + elif parsed: + continue + match = _RE_BUILD_PROGRESS_EVENT.match(line) + if match: + # Ignore this + continue + match = _RE_CONTINUE_EVENT.match(line) + if match: + # Continuing an existing event + index_event = _find_last_event_for(events, match.group('resource_id')) + if index_event is not None: + index, event = index_event + events[-1] = _concat_event_msg(event, match.group('msg')) + event, parsed = _extract_logfmt_event(line, warn_function=warn_function) + if event is not None: + events.append(event) + elif parsed: + continue + if error_event is not None: + # Unparsable line that apparently belongs to the previous error event + events[-1] = _concat_event_msg(error_event, line) + continue + if line.startswith('Error '): + # Error message that is independent of an error event + error_event = Event( + ResourceType.UNKNOWN, + '', + 'Error', + line, + ) + events.append(error_event) + continue + if len(stderr_lines) == 1: + # **Very likely** an error message that is independent of an error event + error_event = Event( + ResourceType.UNKNOWN, + '', + 'Error', + line, + ) + events.append(error_event) + continue + _warn_missing_dry_run_prefix(line, warn_missing_dry_run_prefix, warn_function) + _warn_unparsable_line(line, warn_function) + return events + + +def has_changes(events, ignore_service_pull_events=False): + for event in events: + if event.status in DOCKER_STATUS_WORKING: + if ignore_service_pull_events and event.status in DOCKER_STATUS_PULL: + continue + return True + if event.resource_type == ResourceType.IMAGE_LAYER and event.status in DOCKER_PULL_PROGRESS_WORKING: + return True + return False + + +def extract_actions(events): + actions = [] + pull_actions = set() + for event in events: + if event.resource_type == ResourceType.IMAGE_LAYER and event.status in DOCKER_PULL_PROGRESS_WORKING: + pull_id = (event.resource_id, event.status) + if pull_id not in pull_actions: + pull_actions.add(pull_id) + actions.append({ + 'what': event.resource_type, + 'id': event.resource_id, + 'status': event.status, + }) + if event.resource_type != ResourceType.IMAGE_LAYER and event.status in DOCKER_STATUS_WORKING: + actions.append({ + 'what': event.resource_type, + 'id': event.resource_id, + 'status': event.status, + }) + return actions + + +def emit_warnings(events, warn_function): + for event in events: + # If a message is present, assume it is a warning + if event.status is None and event.msg is not None: + warn_function('Docker compose: {resource_type} {resource_id}: {msg}'.format( + resource_type=event.resource_type, + resource_id=event.resource_id, + msg=event.msg, + )) + + +def is_failed(events, rc): + if rc: + return True + return False + + +def update_failed(result, events, args, stdout, stderr, rc, cli): + if not rc: + return False + errors = [] + for event in events: + if event.status in DOCKER_STATUS_ERROR: + msg = 'Error when processing {resource_type} {resource_id}: ' + if event.resource_type == 'unknown': + msg = 'Error when processing {resource_id}: ' + if event.resource_id == '': + msg = 'General error: ' + msg += '{status}' if event.msg is None else '{msg}' + errors.append(msg.format( + resource_type=event.resource_type, + resource_id=event.resource_id, + status=event.status, + msg=event.msg, + )) + if not errors: + errors.append('Return code {code} is non-zero'.format(code=rc)) + result['failed'] = True + result['msg'] = '\n'.join(errors) + result['cmd'] = ' '.join(shlex_quote(arg) for arg in [cli] + args) + result['stdout'] = to_native(stdout) + result['stderr'] = to_native(stderr) + result['rc'] = rc + return True + + +def common_compose_argspec(): + return dict( + project_src=dict(type='path', required=True), + project_name=dict(type='str'), + files=dict(type='list', elements='path'), + env_files=dict(type='list', elements='path'), + profiles=dict(type='list', elements='str'), + ) + + +def combine_binary_output(*outputs): + return b'\n'.join(out for out in outputs if out) + + +def combine_text_output(*outputs): + return '\n'.join(out for out in outputs if out) + + +class BaseComposeManager(DockerBaseClass): + def __init__(self, client, min_version=MINIMUM_COMPOSE_VERSION): + super(BaseComposeManager, self).__init__() + self.client = client + self.check_mode = self.client.check_mode + parameters = self.client.module.params + + self.project_src = parameters['project_src'] + self.project_name = parameters['project_name'] + self.files = parameters['files'] + self.env_files = parameters['env_files'] + self.profiles = parameters['profiles'] + + compose = self.client.get_client_plugin_info('compose') + if compose is None: + self.client.fail('Docker CLI {0} does not have the compose plugin installed'.format(self.client.get_cli())) + compose_version = compose['Version'].lstrip('v') + self.compose_version = LooseVersion(compose_version) + if self.compose_version < LooseVersion(min_version): + self.client.fail('Docker CLI {cli} has the compose plugin with version {version}; need version {min_version} or later'.format( + cli=self.client.get_cli(), + version=compose_version, + min_version=min_version, + )) + + if not os.path.isdir(self.project_src): + self.client.fail('"{0}" is not a directory'.format(self.project_src)) + + if self.files: + for file in self.files: + path = os.path.join(self.project_src, file) + if not os.path.exists(path): + self.client.fail('Cannot find Compose file "{0}" relative to project directory "{1}"'.format(file, self.project_src)) + elif all(not os.path.exists(os.path.join(self.project_src, f)) for f in DOCKER_COMPOSE_FILES): + filenames = ', '.join(DOCKER_COMPOSE_FILES[:-1]) + self.client.fail('"{0}" does not contain {1}, or {2}'.format(self.project_src, filenames, DOCKER_COMPOSE_FILES[-1])) + + def get_base_args(self): + args = ['compose', '--ansi', 'never'] + if self.compose_version >= LooseVersion('2.19.0'): + # https://github.com/docker/compose/pull/10690 + args.extend(['--progress', 'plain']) + args.extend(['--project-directory', self.project_src]) + if self.project_name: + args.extend(['--project-name', self.project_name]) + for file in self.files or []: + args.extend(['--file', file]) + for env_file in self.env_files or []: + args.extend(['--env-file', env_file]) + for profile in self.profiles or []: + args.extend(['--profile', profile]) + return args + + def list_containers_raw(self): + args = self.get_base_args() + ['ps', '--format', 'json', '--all'] + if self.compose_version >= LooseVersion('2.23.0'): + # https://github.com/docker/compose/pull/11038 + args.append('--no-trunc') + kwargs = dict(cwd=self.project_src, check_rc=True) + if self.compose_version >= LooseVersion('2.21.0'): + # Breaking change in 2.21.0: https://github.com/docker/compose/pull/10918 + dummy, containers, dummy = self.client.call_cli_json_stream(*args, **kwargs) + else: + dummy, containers, dummy = self.client.call_cli_json(*args, **kwargs) + return containers + + def list_containers(self): + result = [] + for container in self.list_containers_raw(): + labels = {} + if container.get('Labels'): + for part in container['Labels'].split(','): + label_value = part.split('=', 1) + labels[label_value[0]] = label_value[1] if len(label_value) > 1 else '' + container['Labels'] = labels + container['Names'] = container.get('Names', container['Name']).split(',') + container['Networks'] = container.get('Networks', '').split(',') + container['Publishers'] = container.get('Publishers') or [] + result.append(container) + return result + + def list_images(self): + args = self.get_base_args() + ['images', '--format', 'json'] + kwargs = dict(cwd=self.project_src, check_rc=True) + dummy, images, dummy = self.client.call_cli_json(*args, **kwargs) + return images + + def parse_events(self, stderr, dry_run=False): + return parse_events(stderr, dry_run=dry_run, warn_function=self.client.warn) + + def emit_warnings(self, events): + emit_warnings(events, warn_function=self.client.warn) + + def update_result(self, result, events, stdout, stderr, ignore_service_pull_events=False): + result['changed'] = result.get('changed', False) or has_changes(events, ignore_service_pull_events=ignore_service_pull_events) + result['actions'] = result.get('actions', []) + extract_actions(events) + result['stdout'] = combine_text_output(result.get('stdout'), to_native(stdout)) + result['stderr'] = combine_text_output(result.get('stderr'), to_native(stderr)) + + def update_failed(self, result, events, args, stdout, stderr, rc): + return update_failed( + result, + events, + args=args, + stdout=stdout, + stderr=stderr, + rc=rc, + cli=self.client.get_cli(), + ) + + def cleanup_result(self, result): + if not result.get('failed'): + # Only return stdout and stderr if it's not empty + for res in ('stdout', 'stderr'): + if result.get(res) == '': + result.pop(res) diff --git a/ansible_collections/community/docker/plugins/module_utils/image_archive.py b/ansible_collections/community/docker/plugins/module_utils/image_archive.py index e174631e2..46b5abc14 100644 --- a/ansible_collections/community/docker/plugins/module_utils/image_archive.py +++ b/ansible_collections/community/docker/plugins/module_utils/image_archive.py @@ -23,7 +23,7 @@ class ImageArchiveManifestSummary(object): :param image_id: File name portion of Config entry, e.g. abcde12345 from abcde12345.json :type image_id: str :param repo_tags Docker image names, e.g. ["hello-world:latest"] - :type repo_tags: list + :type repo_tags: list[str] ''' self.image_id = image_id @@ -60,13 +60,13 @@ def api_image_id(archive_image_id): return 'sha256:%s' % archive_image_id -def archived_image_manifest(archive_path): +def load_archived_image_manifest(archive_path): ''' - Attempts to get Image.Id and image name from metadata stored in the image + Attempts to get image IDs and image names from metadata stored in the image archive tar file. - The tar should contain a file "manifest.json" with an array with a single entry, - and the entry should have a Config field with the image ID in its file name, as + The tar should contain a file "manifest.json" with an array with one or more entries, + and every entry should have a Config field with the image ID in its file name, as well as a RepoTags list, which typically has only one entry. :raises: @@ -75,7 +75,7 @@ def archived_image_manifest(archive_path): :param archive_path: Tar file to read :type archive_path: str - :return: None, if no file at archive_path, or the extracted image ID, which will not have a sha256: prefix. + :return: None, if no file at archive_path, or a list of ImageArchiveManifestSummary objects. :rtype: ImageArchiveManifestSummary ''' @@ -100,44 +100,51 @@ def archived_image_manifest(archive_path): # In Python 2.6, this does not have __exit__ ef.close() - if len(manifest) != 1: + if len(manifest) == 0: raise ImageArchiveInvalidException( - "Expected to have one entry in manifest.json but found %s" % len(manifest), + "Expected to have at least one entry in manifest.json but found none", None ) - m0 = manifest[0] - - try: - config_file = m0['Config'] - except KeyError as exc: - raise ImageArchiveInvalidException( - "Failed to get Config entry from manifest.json: %s" % to_native(exc), - exc - ) - - # Extracts hash without 'sha256:' prefix - try: - # Strip off .json filename extension, leaving just the hash. - image_id = os.path.splitext(config_file)[0] - except Exception as exc: - raise ImageArchiveInvalidException( - "Failed to extract image id from config file name %s: %s" % (config_file, to_native(exc)), - exc - ) - - try: - repo_tags = m0['RepoTags'] - except KeyError as exc: - raise ImageArchiveInvalidException( - "Failed to get RepoTags entry from manifest.json: %s" % to_native(exc), - exc - ) - - return ImageArchiveManifestSummary( - image_id=image_id, - repo_tags=repo_tags - ) + result = [] + for index, meta in enumerate(manifest): + try: + config_file = meta['Config'] + except KeyError as exc: + raise ImageArchiveInvalidException( + "Failed to get Config entry from {0}th manifest in manifest.json: {1}".format(index + 1, to_native(exc)), + exc + ) + + # Extracts hash without 'sha256:' prefix + try: + # Strip off .json filename extension, leaving just the hash. + image_id = os.path.splitext(config_file)[0] + except Exception as exc: + raise ImageArchiveInvalidException( + "Failed to extract image id from config file name %s: %s" % (config_file, to_native(exc)), + exc + ) + + for prefix in ( + 'blobs/sha256/', # Moby 25.0.0, Docker API 1.44 + ): + if image_id.startswith(prefix): + image_id = image_id[len(prefix):] + + try: + repo_tags = meta['RepoTags'] + except KeyError as exc: + raise ImageArchiveInvalidException( + "Failed to get RepoTags entry from {0}th manifest in manifest.json: {1}".format(index + 1, to_native(exc)), + exc + ) + + result.append(ImageArchiveManifestSummary( + image_id=image_id, + repo_tags=repo_tags + )) + return result except ImageArchiveInvalidException: raise @@ -155,3 +162,33 @@ def archived_image_manifest(archive_path): raise except Exception as exc: raise ImageArchiveInvalidException("Failed to open tar file %s: %s" % (archive_path, to_native(exc)), exc) + + +def archived_image_manifest(archive_path): + ''' + Attempts to get Image.Id and image name from metadata stored in the image + archive tar file. + + The tar should contain a file "manifest.json" with an array with a single entry, + and the entry should have a Config field with the image ID in its file name, as + well as a RepoTags list, which typically has only one entry. + + :raises: + ImageArchiveInvalidException: A file already exists at archive_path, but could not extract an image ID from it. + + :param archive_path: Tar file to read + :type archive_path: str + + :return: None, if no file at archive_path, or the extracted image ID, which will not have a sha256: prefix. + :rtype: ImageArchiveManifestSummary + ''' + + results = load_archived_image_manifest(archive_path) + if results is None: + return None + if len(results) == 1: + return results[0] + raise ImageArchiveInvalidException( + "Expected to have one entry in manifest.json but found %s" % len(results), + None + ) diff --git a/ansible_collections/community/docker/plugins/module_utils/module_container/base.py b/ansible_collections/community/docker/plugins/module_utils/module_container/base.py index 21c29226e..0f776aa5c 100644 --- a/ansible_collections/community/docker/plugins/module_utils/module_container/base.py +++ b/ansible_collections/community/docker/plugins/module_utils/module_container/base.py @@ -19,10 +19,15 @@ from ansible.module_utils.six import string_types from ansible_collections.community.docker.plugins.module_utils.util import ( clean_dict_booleans_for_docker_api, + compare_generic, normalize_healthcheck, omit_none_from_dict, ) +from ansible_collections.community.docker.plugins.module_utils._platform import ( + compare_platform_strings, +) + from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( parse_env_file, ) @@ -67,6 +72,7 @@ class Option(object): not_a_container_option=False, not_an_ansible_option=False, copy_comparison_from=None, + compare=None, ): self.name = name self.type = type @@ -106,6 +112,11 @@ class Option(object): self.not_a_container_option = not_a_container_option self.not_an_ansible_option = not_an_ansible_option self.copy_comparison_from = copy_comparison_from + self.compare = ( + lambda param_value, container_value: compare(self, param_value, container_value) + ) if compare else ( + lambda param_value, container_value: compare_generic(param_value, container_value, self.comparison, self.comparison_type) + ) class OptionGroup(object): @@ -166,17 +177,21 @@ class OptionGroup(object): class Engine(object): min_api_version = None # string or None min_api_version_obj = None # LooseVersion object or None + extra_option_minimal_versions = None # dict[str, dict[str, Any]] or None @abc.abstractmethod - def get_value(self, module, container, api_version, options): + def get_value(self, module, container, api_version, options, image, host_info): pass + def compare_value(self, option, param_value, container_value): + return option.compare(param_value, container_value) + @abc.abstractmethod def set_value(self, module, data, api_version, options, values): pass @abc.abstractmethod - def get_expected_values(self, module, client, api_version, options, image, values): + def get_expected_values(self, module, client, api_version, options, image, values, host_info): pass @abc.abstractmethod @@ -199,6 +214,14 @@ class Engine(object): def can_update_value(self, api_version): pass + @abc.abstractmethod + def needs_container_image(self, values): + pass + + @abc.abstractmethod + def needs_host_info(self, values): + pass + class EngineDriver(object): name = None # string @@ -209,6 +232,10 @@ class EngineDriver(object): pass @abc.abstractmethod + def get_host_info(self, client): + pass + + @abc.abstractmethod def get_api_version(self, client): pass @@ -483,6 +510,8 @@ def _preprocess_networks(module, values): parsed_link = (link, link) parsed_links.append(tuple(parsed_link)) network['links'] = parsed_links + if network['mac_address']: + network['mac_address'] = network['mac_address'].replace('-', ':') return values @@ -733,6 +762,15 @@ def _preprocess_ports(module, values): return values +def _compare_platform(option, param_value, container_value): + if option.comparison == 'ignore': + return True + try: + return compare_platform_strings(param_value, container_value) + except ValueError: + return param_value == container_value + + OPTION_AUTO_REMOVE = ( OptionGroup() .add_option('auto_remove', type='bool') @@ -910,7 +948,7 @@ OPTION_HOSTNAME = ( ) OPTION_IMAGE = ( - OptionGroup(preprocess=_preprocess_networks) + OptionGroup() .add_option('image', type='str') ) @@ -984,6 +1022,7 @@ OPTION_NETWORK = ( ipv6_address=dict(type='str'), aliases=dict(type='list', elements='str'), links=dict(type='list', elements='str'), + mac_address=dict(type='str'), )) ) @@ -1009,7 +1048,7 @@ OPTION_PIDS_LIMIT = ( OPTION_PLATFORM = ( OptionGroup() - .add_option('platform', type='str') + .add_option('platform', type='str', compare=_compare_platform) ) OPTION_PRIVILEGED = ( diff --git a/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py b/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py index cccf72df4..61a5500c9 100644 --- a/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py +++ b/ansible_collections/community/docker/plugins/module_utils/module_container/docker_api.py @@ -17,6 +17,11 @@ from ansible_collections.community.docker.plugins.module_utils.common_api import RequestException, ) +from ansible_collections.community.docker.plugins.module_utils._platform import ( + compose_platform_string, + normalize_platform_string, +) + from ansible_collections.community.docker.plugins.module_utils.module_container.base import ( OPTION_AUTO_REMOVE, OPTION_BLKIO_WEIGHT, @@ -165,6 +170,8 @@ class DockerAPIEngineDriver(EngineDriver): for option in options.options: if not option.not_an_ansible_option: option_minimal_versions[option.name] = {'docker_api_version': engine.min_api_version} + if engine.extra_option_minimal_versions: + option_minimal_versions.update(engine.extra_option_minimal_versions) active_options.append(options) @@ -181,6 +188,9 @@ class DockerAPIEngineDriver(EngineDriver): return client.module, active_options, client + def get_host_info(self, client): + return client.info() + def get_api_version(self, client): return client.docker_api_version @@ -216,7 +226,7 @@ class DockerAPIEngineDriver(EngineDriver): return client.get_container_by_id(container_id) def inspect_image_by_id(self, client, image_id): - return client.find_image_by_id(image_id) + return client.find_image_by_id(image_id, accept_missing_image=True) def inspect_image_by_name(self, client, repository, tag): return client.find_image(repository, tag) @@ -236,7 +246,13 @@ class DockerAPIEngineDriver(EngineDriver): def connect_container_to_network(self, client, container_id, network_id, parameters=None): parameters = (parameters or {}).copy() params = {} - for para, dest_para in {'ipv4_address': 'IPv4Address', 'ipv6_address': 'IPv6Address', 'links': 'Links', 'aliases': 'Aliases'}.items(): + for para, dest_para in { + 'ipv4_address': 'IPv4Address', + 'ipv6_address': 'IPv6Address', + 'links': 'Links', + 'aliases': 'Aliases', + 'mac_address': 'MacAddress', + }.items(): value = parameters.pop(para, None) if value: if para == 'links': @@ -387,18 +403,27 @@ class DockerAPIEngine(Engine): can_set_value=None, can_update_value=None, min_api_version=None, + compare_value=None, + needs_container_image=None, + needs_host_info=None, + extra_option_minimal_versions=None, ): self.min_api_version = min_api_version self.min_api_version_obj = None if min_api_version is None else LooseVersion(min_api_version) self.get_value = get_value self.set_value = set_value - self.get_expected_values = get_expected_values or (lambda module, client, api_version, options, image, values: values) + self.get_expected_values = get_expected_values or (lambda module, client, api_version, options, image, values, host_info: values) self.ignore_mismatching_result = ignore_mismatching_result or \ (lambda module, client, api_version, option, image, container_value, expected_value: False) self.preprocess_value = preprocess_value or (lambda module, client, api_version, options, values: values) self.update_value = update_value self.can_set_value = can_set_value or (lambda api_version: set_value is not None) self.can_update_value = can_update_value or (lambda api_version: update_value is not None) + self.needs_container_image = needs_container_image or (lambda values: False) + self.needs_host_info = needs_host_info or (lambda values: False) + if compare_value is not None: + self.compare_value = compare_value + self.extra_option_minimal_versions = extra_option_minimal_versions @classmethod def config_value( @@ -423,7 +448,7 @@ class DockerAPIEngine(Engine): values[options[0].name] = value return values - def get_value(module, container, api_version, options): + def get_value(module, container, api_version, options, image, host_info): if len(options) != 1: raise AssertionError('config_value can only be used for a single option') value = container['Config'].get(config_name, _SENTRY) @@ -435,7 +460,7 @@ class DockerAPIEngine(Engine): get_expected_values_ = None if get_expected_value: - def get_expected_values_(module, client, api_version, options, image, values): + def get_expected_values_(module, client, api_version, options, image, values, host_info): if len(options) != 1: raise AssertionError('host_config_value can only be used for a single option') value = values.get(options[0].name, _SENTRY) @@ -499,7 +524,7 @@ class DockerAPIEngine(Engine): values[options[0].name] = value return values - def get_value(module, container, api_version, options): + def get_value(module, container, api_version, options, get_value, host_info): if len(options) != 1: raise AssertionError('host_config_value can only be used for a single option') value = container['HostConfig'].get(host_config_name, _SENTRY) @@ -511,7 +536,7 @@ class DockerAPIEngine(Engine): get_expected_values_ = None if get_expected_value: - def get_expected_values_(module, client, api_version, options, image, values): + def get_expected_values_(module, client, api_version, options, image, values, host_info): if len(options) != 1: raise AssertionError('host_config_value can only be used for a single option') value = values.get(options[0].name, _SENTRY) @@ -585,7 +610,7 @@ def _get_default_host_ip(module, client): return ip -def _get_value_detach_interactive(module, container, api_version, options): +def _get_value_detach_interactive(module, container, api_version, options, image, host_info): attach_stdin = container['Config'].get('OpenStdin') attach_stderr = container['Config'].get('AttachStderr') attach_stdout = container['Config'].get('AttachStdout') @@ -836,7 +861,7 @@ def _get_network_id(module, client, network_name): client.fail("Error getting network id for %s - %s" % (network_name, to_native(exc))) -def _get_values_network(module, container, api_version, options): +def _get_values_network(module, container, api_version, options, image, host_info): value = container['HostConfig'].get('NetworkMode', _SENTRY) if value is _SENTRY: return {} @@ -852,7 +877,7 @@ def _set_values_network(module, data, api_version, options, values): data['HostConfig']['NetworkMode'] = value -def _get_values_mounts(module, container, api_version, options): +def _get_values_mounts(module, container, api_version, options, image, host_info): volumes = container['Config'].get('Volumes') binds = container['HostConfig'].get('Binds') # According to https://github.com/moby/moby/, support for HostConfig.Mounts @@ -916,7 +941,7 @@ def _get_image_binds(volumes): return results -def _get_expected_values_mounts(module, client, api_version, options, image, values): +def _get_expected_values_mounts(module, client, api_version, options, image, values, host_info): expected_values = {} # binds @@ -1017,7 +1042,7 @@ def _set_values_mounts(module, data, api_version, options, values): data['HostConfig']['Binds'] = values['volume_binds'] -def _get_values_log(module, container, api_version, options): +def _get_values_log(module, container, api_version, options, image, host_info): log_config = container['HostConfig'].get('LogConfig') or {} return { 'log_driver': log_config.get('Type'), @@ -1037,18 +1062,50 @@ def _set_values_log(module, data, api_version, options, values): data['HostConfig']['LogConfig'] = log_config -def _get_values_platform(module, container, api_version, options): +def _get_values_platform(module, container, api_version, options, image, host_info): + if image and (image.get('Os') or image.get('Architecture') or image.get('Variant')): + return { + 'platform': compose_platform_string( + os=image.get('Os'), + arch=image.get('Architecture'), + variant=image.get('Variant'), + daemon_os=host_info.get('OSType') if host_info else None, + daemon_arch=host_info.get('Architecture') if host_info else None, + ) + } return { 'platform': container.get('Platform'), } +def _get_expected_values_platform(module, client, api_version, options, image, values, host_info): + expected_values = {} + if 'platform' in values: + try: + expected_values['platform'] = normalize_platform_string( + values['platform'], + daemon_os=host_info.get('OSType') if host_info else None, + daemon_arch=host_info.get('Architecture') if host_info else None, + ) + except ValueError as exc: + module.fail_json(msg='Error while parsing platform parameer: %s' % (to_native(exc), )) + return expected_values + + def _set_values_platform(module, data, api_version, options, values): if 'platform' in values: data['platform'] = values['platform'] -def _get_values_restart(module, container, api_version, options): +def _needs_container_image_platform(values): + return 'platform' in values + + +def _needs_host_info_platform(values): + return 'platform' in values + + +def _get_values_restart(module, container, api_version, options, image, host_info): restart_policy = container['HostConfig'].get('RestartPolicy') or {} return { 'restart_policy': restart_policy.get('Name'), @@ -1077,7 +1134,7 @@ def _update_value_restart(module, data, api_version, options, values): } -def _get_values_ports(module, container, api_version, options): +def _get_values_ports(module, container, api_version, options, image, host_info): host_config = container['HostConfig'] config = container['Config'] @@ -1094,7 +1151,7 @@ def _get_values_ports(module, container, api_version, options): } -def _get_expected_values_ports(module, client, api_version, options, image, values): +def _get_expected_values_ports(module, client, api_version, options, image, values, host_info): expected_values = {} if 'published_ports' in values: @@ -1283,6 +1340,12 @@ OPTION_NETWORK.add_engine('docker_api', DockerAPIEngine( get_value=_get_values_network, set_value=_set_values_network, ignore_mismatching_result=_ignore_mismatching_network_result, + extra_option_minimal_versions={ + 'networks.mac_address': { + 'docker_api_version': '1.44', + 'detect_usage': lambda c: any(net_info.get('mac_address') is not None for net_info in (c.module.params['networks'] or [])), + }, + }, )) OPTION_OOM_KILLER.add_engine('docker_api', DockerAPIEngine.host_config_value('OomKillDisable')) @@ -1296,6 +1359,9 @@ OPTION_PIDS_LIMIT.add_engine('docker_api', DockerAPIEngine.host_config_value('Pi OPTION_PLATFORM.add_engine('docker_api', DockerAPIEngine( get_value=_get_values_platform, set_value=_set_values_platform, + get_expected_values=_get_expected_values_platform, + needs_container_image=_needs_container_image_platform, + needs_host_info=_needs_host_info_platform, min_api_version='1.41', )) diff --git a/ansible_collections/community/docker/plugins/module_utils/module_container/module.py b/ansible_collections/community/docker/plugins/module_utils/module_container/module.py index 230dbfb40..5d819efa4 100644 --- a/ansible_collections/community/docker/plugins/module_utils/module_container/module.py +++ b/ansible_collections/community/docker/plugins/module_utils/module_container/module.py @@ -78,6 +78,11 @@ class ContainerManager(DockerBaseClass): self.param_output_logs = self.module.params['output_logs'] self.param_paused = self.module.params['paused'] self.param_pull = self.module.params['pull'] + if self.param_pull is True: + self.param_pull = 'always' + if self.param_pull is False: + self.param_pull = 'missing' + self.param_pull_check_mode_behavior = self.module.params['pull_check_mode_behavior'] self.param_recreate = self.module.params['recreate'] self.param_removal_wait_timeout = self.module.params['removal_wait_timeout'] self.param_restart = self.module.params['restart'] @@ -132,7 +137,7 @@ class ContainerManager(DockerBaseClass): self.all_options['image'].comparison = 'ignore' if self.module.params['purge_networks']: self.all_options['networks'].comparison = 'strict' - # Process comparsions specified by user + # Process comparisons specified by user if self.module.params.get('comparisons'): # If '*' appears in comparisons, process it first if '*' in self.module.params['comparisons']: @@ -268,6 +273,20 @@ class ContainerManager(DockerBaseClass): parameters.append((options, values)) return parameters + def _needs_container_image(self): + for options, values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + if engine.needs_container_image(values): + return True + return False + + def _needs_host_info(self): + for options, values in self.parameters: + engine = options.get_engine(self.engine_driver.name) + if engine.needs_host_info(values): + return True + return False + def present(self, state): self.parameters = self._collect_params(self.options) container = self._get_container(self.param_name) @@ -280,8 +299,10 @@ class ContainerManager(DockerBaseClass): # the container already runs or not; in the former case, in case the # container needs to be restarted, we use the existing container's # image ID. - image, comparison_image = self._get_image(container) + image, container_image, comparison_image = self._get_image( + container, needs_container_image=self._needs_container_image()) self.log(image, pretty_print=True) + host_info = self.engine_driver.get_host_info(self.client) if self._needs_host_info() else None if not container.exists or container.removing: # New container if container.removing: @@ -301,13 +322,24 @@ class ContainerManager(DockerBaseClass): container_created = True else: # Existing container - different, differences = self.has_different_configuration(container, comparison_image) + different, differences = self.has_different_configuration(container, container_image, comparison_image, host_info) image_different = False if self.all_options['image'].comparison == 'strict': image_different = self._image_is_different(image, container) - if self.param_image_name_mismatch == 'recreate' and self.param_image is not None and self.param_image != container.image_name: - different = True - self.diff_tracker.add('image_name', parameter=self.param_image, active=container.image_name) + if self.param_image_name_mismatch != 'ignore' and self.param_image is not None and self.param_image != container.image_name: + if self.param_image_name_mismatch == 'recreate': + different = True + self.diff_tracker.add('image_name', parameter=self.param_image, active=container.image_name) + else: + # The default has been deprecated! + self.module.deprecate( + 'The default value "ignore" for image_name_mismatch has been deprecated and will change to "recreate"' + ' in community.docker 4.0.0. In the current situation, this would cause the container to be recreated' + ' since the current container\'s image name "{active}" does not match the desired image name "{parameter}".'.format( + parameter=self.param_image, active=container.image_name), + version='4.0.0', + collection_name='community.docker', + ) if image_different or different or self.param_recreate: self.diff_tracker.merge(differences) self.diff['differences'] = differences.get_legacy_docker_container_diffs() @@ -333,7 +365,7 @@ class ContainerManager(DockerBaseClass): comparison_image = image if container and container.exists: - container = self.update_limits(container, comparison_image) + container = self.update_limits(container, container_image, comparison_image, host_info) container = self.update_networks(container, container_created) if state == 'started' and not container.running: @@ -398,45 +430,58 @@ class ContainerManager(DockerBaseClass): image = self.engine_driver.inspect_image_by_name(self.client, repository, tag) return image or fallback - def _get_image(self, container): + def _get_image(self, container, needs_container_image=False): image_parameter = self.param_image + get_container_image = needs_container_image or not image_parameter + container_image = self._get_container_image(container) if get_container_image else None + if container_image: + self.log("current image") + self.log(container_image, pretty_print=True) if not image_parameter: self.log('No image specified') - return None, self._get_container_image(container) + return None, container_image, container_image if is_image_name_id(image_parameter): image = self.engine_driver.inspect_image_by_id(self.client, image_parameter) + if image is None: + self.client.fail("Cannot find image with ID %s" % (image_parameter, )) else: repository, tag = parse_repository_tag(image_parameter) if not tag: tag = "latest" image = self.engine_driver.inspect_image_by_name(self.client, repository, tag) - if not image or self.param_pull: + if not image and self.param_pull == "never": + self.client.fail("Cannot find image with name %s:%s, and pull=never" % (repository, tag)) + if not image or self.param_pull == "always": if not self.check_mode: self.log("Pull the image.") image, alreadyToLatest = self.engine_driver.pull_image( self.client, repository, tag, platform=self.module.params['platform']) if alreadyToLatest: self.results['changed'] = False + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=False)) else: self.results['changed'] = True - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) - elif not image: - # If the image isn't there, claim we'll pull. - # (Implicitly: if the image is there, claim it already was latest.) + self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag), changed=True)) + elif not image or self.param_pull_check_mode_behavior == 'always': + # If the image isn't there, or pull_check_mode_behavior == 'always', claim we'll + # pull. (Implicitly: if the image is there, claim it already was latest unless + # pull_check_mode_behavior == 'always'.) self.results['changed'] = True - self.results['actions'].append(dict(pulled_image="%s:%s" % (repository, tag))) + action = dict(pulled_image="%s:%s" % (repository, tag)) + if not image: + action['changed'] = True + self.results['actions'].append(action) self.log("image") self.log(image, pretty_print=True) comparison_image = image if self.param_image_comparison == 'current-image': - comparison_image = self._get_container_image(container, image) - if comparison_image != image: - self.log("current image") - self.log(comparison_image, pretty_print=True) + if not get_container_image: + container_image = self._get_container_image(container) + comparison_image = container_image - return image, comparison_image + return image, container_image, comparison_image def _image_is_different(self, image, container): if image and image.get('Id'): @@ -455,15 +500,16 @@ class ContainerManager(DockerBaseClass): params['Image'] = image return params - def _record_differences(self, differences, options, param_values, engine, container, image): - container_values = engine.get_value(self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options) + def _record_differences(self, differences, options, param_values, engine, container, container_image, image, host_info): + container_values = engine.get_value( + self.module, container.raw, self.engine_driver.get_api_version(self.client), options.options, container_image, host_info) expected_values = engine.get_expected_values( - self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy()) + self.module, self.client, self.engine_driver.get_api_version(self.client), options.options, image, param_values.copy(), host_info) for option in options.options: if option.name in expected_values: param_value = expected_values[option.name] container_value = container_values.get(option.name) - match = compare_generic(param_value, container_value, option.comparison, option.comparison_type) + match = engine.compare_value(option, param_value, container_value) if not match: # No match. @@ -497,28 +543,28 @@ class ContainerManager(DockerBaseClass): c = sorted(c, key=sort_key_fn) differences.add(option.name, parameter=p, active=c) - def has_different_configuration(self, container, image): + def has_different_configuration(self, container, container_image, image, host_info): differences = DifferenceTracker() update_differences = DifferenceTracker() for options, param_values in self.parameters: engine = options.get_engine(self.engine_driver.name) if engine.can_update_value(self.engine_driver.get_api_version(self.client)): - self._record_differences(update_differences, options, param_values, engine, container, image) + self._record_differences(update_differences, options, param_values, engine, container, container_image, image, host_info) else: - self._record_differences(differences, options, param_values, engine, container, image) + self._record_differences(differences, options, param_values, engine, container, container_image, image, host_info) has_differences = not differences.empty # Only consider differences of properties that can be updated when there are also other differences if has_differences: differences.merge(update_differences) return has_differences, differences - def has_different_resource_limits(self, container, image): + def has_different_resource_limits(self, container, container_image, image, host_info): differences = DifferenceTracker() for options, param_values in self.parameters: engine = options.get_engine(self.engine_driver.name) if not engine.can_update_value(self.engine_driver.get_api_version(self.client)): continue - self._record_differences(differences, options, param_values, engine, container, image) + self._record_differences(differences, options, param_values, engine, container, container_image, image, host_info) has_differences = not differences.empty return has_differences, differences @@ -531,8 +577,8 @@ class ContainerManager(DockerBaseClass): engine.update_value(self.module, result, self.engine_driver.get_api_version(self.client), options.options, values) return result - def update_limits(self, container, image): - limits_differ, different_limits = self.has_different_resource_limits(container, image) + def update_limits(self, container, container_image, image, host_info): + limits_differ, different_limits = self.has_different_resource_limits(container, container_image, image, host_info) if limits_differ: self.log("limit differences:") self.log(different_limits.get_legacy_docker_container_diffs(), pretty_print=True) @@ -580,6 +626,8 @@ class ContainerManager(DockerBaseClass): expected_links.append("%s:%s" % (link, alias)) if not compare_generic(expected_links, network_info.get('Links'), 'allow_more_present', 'set'): diff = True + if network.get('mac_address') and network['mac_address'] != network_info.get('MacAddress'): + diff = True if diff: different = True differences.append(dict( @@ -589,7 +637,8 @@ class ContainerManager(DockerBaseClass): ipv4_address=network_info_ipam.get('IPv4Address'), ipv6_address=network_info_ipam.get('IPv6Address'), aliases=network_info.get('Aliases'), - links=network_info.get('Links') + links=network_info.get('Links'), + mac_address=network_info.get('MacAddress'), ) )) return different, differences @@ -816,14 +865,15 @@ def run_module(engine_driver): image=dict(type='str'), image_comparison=dict(type='str', choices=['desired-image', 'current-image'], default='desired-image'), image_label_mismatch=dict(type='str', choices=['ignore', 'fail'], default='ignore'), - image_name_mismatch=dict(type='str', choices=['ignore', 'recreate'], default='ignore'), + image_name_mismatch=dict(type='str', choices=['ignore', 'recreate']), keep_volumes=dict(type='bool', default=True), kill_signal=dict(type='str'), name=dict(type='str', required=True), networks_cli_compatible=dict(type='bool', default=True), output_logs=dict(type='bool', default=False), paused=dict(type='bool'), - pull=dict(type='bool', default=False), + pull=dict(type='raw', choices=['never', 'missing', 'always', True, False], default='missing'), + pull_check_mode_behavior=dict(type='str', choices=['image_not_present', 'always'], default='image_not_present'), purge_networks=dict(type='bool', default=False, removed_in_version='4.0.0', removed_from_collection='community.docker'), recreate=dict(type='bool', default=False), removal_wait_timeout=dict(type='float'), diff --git a/ansible_collections/community/docker/plugins/module_utils/util.py b/ansible_collections/community/docker/plugins/module_utils/util.py index 9c6b738c6..efd3301f1 100644 --- a/ansible_collections/community/docker/plugins/module_utils/util.py +++ b/ansible_collections/community/docker/plugins/module_utils/util.py @@ -6,6 +6,7 @@ from __future__ import (absolute_import, division, print_function) __metaclass__ = type +import json import re from datetime import timedelta @@ -14,7 +15,7 @@ from ansible.module_utils.common.collections import is_sequence from ansible.module_utils.six.moves.urllib.parse import urlparse -DEFAULT_DOCKER_HOST = 'unix://var/run/docker.sock' +DEFAULT_DOCKER_HOST = 'unix:///var/run/docker.sock' DEFAULT_TLS = False DEFAULT_TLS_VERIFY = False DEFAULT_TLS_HOSTNAME = 'localhost' # deprecated @@ -25,7 +26,7 @@ DOCKER_COMMON_ARGS = dict( tls_hostname=dict(type='str', fallback=(env_fallback, ['DOCKER_TLS_HOSTNAME'])), api_version=dict(type='str', default='auto', fallback=(env_fallback, ['DOCKER_API_VERSION']), aliases=['docker_api_version']), timeout=dict(type='int', default=DEFAULT_TIMEOUT_SECONDS, fallback=(env_fallback, ['DOCKER_TIMEOUT'])), - ca_cert=dict(type='path', aliases=['tls_ca_cert', 'cacert_path']), + ca_path=dict(type='path', aliases=['ca_cert', 'tls_ca_cert', 'cacert_path']), client_cert=dict(type='path', aliases=['tls_client_cert', 'cert_path']), client_key=dict(type='path', aliases=['tls_client_key', 'key_path']), ssl_version=dict(type='str', fallback=(env_fallback, ['DOCKER_SSL_VERSION'])), @@ -85,6 +86,19 @@ def sanitize_result(data): return data +def log_debug(msg, pretty_print=False): + """Write a log message to docker.log. + + If ``pretty_print=True``, the message will be pretty-printed as JSON. + """ + with open('docker.log', 'a') as log_file: + if pretty_print: + log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) + log_file.write(u'\n') + else: + log_file.write(msg + u'\n') + + class DockerBaseClass(object): def __init__(self): self.debug = False @@ -92,12 +106,7 @@ class DockerBaseClass(object): def log(self, msg, pretty_print=False): pass # if self.debug: - # log_file = open('docker.log', 'a') - # if pretty_print: - # log_file.write(json.dumps(msg, sort_keys=True, indent=4, separators=(',', ': '))) - # log_file.write(u'\n') - # else: - # log_file.write(msg + u'\n') + # log_debug(msg, pretty_print=pretty_print) def update_tls_hostname(result, old_behavior=False, deprecate_function=None, uses_tls=True): diff --git a/ansible_collections/community/docker/plugins/modules/current_container_facts.py b/ansible_collections/community/docker/plugins/modules/current_container_facts.py index f2cde2b59..2daf60bb0 100644 --- a/ansible_collections/community/docker/plugins/modules/current_container_facts.py +++ b/ansible_collections/community/docker/plugins/modules/current_container_facts.py @@ -58,10 +58,10 @@ ansible_facts: description: - The detected container environment. - Contains an empty string if no container was detected, or a non-empty string identifying the container environment. - - C(docker) indicates that the module ran inside a regular Docker container. - - C(azure_pipelines) indicates that the module ran on Azure Pipelines. This seems to no longer be reported. - - C(github_actions) indicates that the module ran inside a Docker container on GitHub Actions. It is supported since community.docker 2.4.0. - - C(podman) indicates that the module ran inside a regular Podman container. It is supported since community.docker 3.3.0. + - V(docker) indicates that the module ran inside a regular Docker container. + - V(azure_pipelines) indicates that the module ran on Azure Pipelines. This seems to no longer be reported. + - V(github_actions) indicates that the module ran inside a Docker container on GitHub Actions. It is supported since community.docker 2.4.0. + - V(podman) indicates that the module ran inside a regular Podman container. It is supported since community.docker 3.3.0. returned: always type: str choices: diff --git a/ansible_collections/community/docker/plugins/modules/docker_compose.py b/ansible_collections/community/docker/plugins/modules/docker_compose.py index 01db6a22f..f8edbee4b 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_compose.py +++ b/ansible_collections/community/docker/plugins/modules/docker_compose.py @@ -12,13 +12,14 @@ DOCUMENTATION = ''' module: docker_compose -short_description: Manage multi-container Docker applications with Docker Compose. +short_description: Manage multi-container Docker applications with Docker Compose V1 author: "Chris Houseknecht (@chouseknecht)" description: - Uses Docker Compose to start, shutdown and scale services. B(This module requires docker-compose < 2.0.0.) - - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the I(definition) option. + Use the M(community.docker.docker_compose_v2) module for using the modern Docker compose CLI plugin. + - Configuration can be read from a C(docker-compose.yml) or C(docker-compose.yaml) file or inline using the O(definition) option. - See the examples for more details. - Supports check mode. - This module was called C(docker_service) before Ansible 2.8. The usage did not change. @@ -39,27 +40,27 @@ options: project_src: description: - Path to a directory containing a C(docker-compose.yml) or C(docker-compose.yaml) file. - - Mutually exclusive with I(definition). - - Required when no I(definition) is provided. + - Mutually exclusive with O(definition). + - Required when no O(definition) is provided. type: path project_name: description: - - Provide a project name. If not provided, the project name is taken from the basename of I(project_src). - - Required when I(definition) is provided. + - Provide a project name. If not provided, the project name is taken from the basename of O(project_src). + - Required when O(definition) is provided. type: str env_file: description: - - By default environment files are loaded from a C(.env) file located directly under the I(project_src) directory. - - I(env_file) can be used to specify the path of a custom environment file instead. - - The path is relative to the I(project_src) directory. + - By default environment files are loaded from a C(.env) file located directly under the O(project_src) directory. + - O(env_file) can be used to specify the path of a custom environment file instead. + - The path is relative to the O(project_src) directory. - Requires C(docker-compose) version 1.25.0 or greater. - "Note: C(docker-compose) versions C(<=1.28) load the env file from the current working directory of the - C(docker-compose) command rather than I(project_src)." + C(docker-compose) command rather than O(project_src)." type: path version_added: 1.9.0 files: description: - - List of Compose file names relative to I(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml). + - List of Compose file names relative to O(project_src). Overrides C(docker-compose.yml) or C(docker-compose.yaml). - Files are loaded and merged in the order given. type: list elements: path @@ -74,9 +75,9 @@ options: state: description: - Desired state of the project. - - Specifying C(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) - (with I(restarted)). - - Specifying C(absent) is the same as running C(docker-compose down). + - Specifying V(present) is the same as running C(docker-compose up) resp. C(docker-compose stop) (with O(stopped=true)) resp. C(docker-compose restart) + (with O(restarted=true)). + - Specifying V(absent) is the same as running C(docker-compose down). type: str default: present choices: @@ -84,25 +85,25 @@ options: - present services: description: - - When I(state) is C(present) run C(docker-compose up) resp. C(docker-compose stop) (with I(stopped)) resp. C(docker-compose restart) (with I(restarted)) - on a subset of services. - - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline I(definition)). + - When O(state) is V(present) run C(docker-compose up) resp. C(docker-compose stop) (with O(stopped=true)) resp. + C(docker-compose restart) (with O(restarted=true)) on a subset of services. + - If empty, which is the default, the operation will be performed on all services defined in the Compose file (or inline O(definition)). type: list elements: str scale: description: - - When I(state) is C(present) scale services. Provide a dictionary of key/value pairs where the key + - When O(state) is V(present) scale services. Provide a dictionary of key/value pairs where the key is the name of the service and the value is an integer count for the number of containers. type: dict dependencies: description: - - When I(state) is C(present) specify whether or not to include linked services. + - When O(state) is V(present) specify whether or not to include linked services. type: bool default: true definition: description: - Compose file describing one or more services, networks and volumes. - - Mutually exclusive with I(project_src) and I(files). + - Mutually exclusive with O(project_src) and O(files). type: dict hostname_check: description: @@ -112,8 +113,8 @@ options: recreate: description: - By default containers will be recreated when their configuration differs from the service definition. - - Setting to C(never) ignores configuration differences and leaves existing containers unchanged. - - Setting to C(always) forces recreation of all existing containers. + - Setting to V(never) ignores configuration differences and leaves existing containers unchanged. + - Setting to V(always) forces recreation of all existing containers. type: str default: smart choices: @@ -122,49 +123,49 @@ options: - smart build: description: - - Use with I(state) C(present) to always build images prior to starting the application. + - Use with O(state=present) to always build images prior to starting the application. - Same as running C(docker-compose build) with the pull option. - Images will only be rebuilt if Docker detects a change in the Dockerfile or build directory contents. - - Use the I(nocache) option to ignore the image cache when performing the build. - - If an existing image is replaced, services using the image will be recreated unless I(recreate) is C(never). + - Use the O(nocache) option to ignore the image cache when performing the build. + - If an existing image is replaced, services using the image will be recreated unless O(recreate=never). type: bool default: false pull: description: - - Use with I(state) C(present) to always pull images prior to starting the application. + - Use with O(state=present) to always pull images prior to starting the application. - Same as running C(docker-compose pull). - - When a new image is pulled, services using the image will be recreated unless I(recreate) is C(never). + - When a new image is pulled, services using the image will be recreated unless O(recreate=never). type: bool default: false nocache: description: - - Use with the I(build) option to ignore the cache during the image build process. + - Use with the O(build) option to ignore the cache during the image build process. type: bool default: false remove_images: description: - - Use with I(state) C(absent) to remove all images or only local images. + - Use with O(state=absent) to remove all images or only local images. type: str choices: - 'all' - 'local' remove_volumes: description: - - Use with I(state) C(absent) to remove data volumes. + - Use with O(state=absent) to remove data volumes. type: bool default: false stopped: description: - - Use with I(state) C(present) to stop all containers defined in the Compose file. - - If I(services) is defined, only the containers listed there will be stopped. + - Use with O(state=present) to stop all containers defined in the Compose file. + - If O(services) is defined, only the containers listed there will be stopped. - Requires C(docker-compose) version 1.17.0 or greater for full support. For older versions, the services will first be started and then stopped when the service is supposed to be created as stopped. type: bool default: false restarted: description: - - Use with I(state) C(present) to restart all containers defined in the Compose file. - - If I(services) is defined, only the containers listed there will be restarted. + - Use with O(state=present) to restart all containers defined in the Compose file. + - If O(services) is defined, only the containers listed there will be restarted. type: bool default: false remove_orphans: @@ -175,8 +176,8 @@ options: timeout: description: - Timeout in seconds for container shutdown when attached or when containers are already running. - - By default C(compose) will use a C(10s) timeout unless C(default_grace_period) is defined for a - particular service in the I(project_src). + - By default C(docker-compose) will use a V(10) seconds timeout unless C(default_grace_period) is defined for a + particular service in the O(project_src). type: int default: null use_ssh_client: @@ -184,10 +185,13 @@ options: - Currently ignored for this module, but might suddenly be supported later on. requirements: - - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0" + - "L(Docker SDK for Python,https://docker-py.readthedocs.io/en/stable/) >= 1.8.0, < 7. Docker SDK for Python 7+ is incompatible to docker-compose v1." - "docker-compose >= 1.7.0, < 2.0.0" - "Docker API >= 1.25" - "PyYAML >= 3.11" + +seealso: + - module: community.docker.docker_compose_v2 ''' EXAMPLES = ''' @@ -236,7 +240,7 @@ EXAMPLES = ''' ansible.builtin.debug: var: output - - name: Verify that web and db services are running + - name: Verify that web and db services are not running ansible.builtin.assert: that: - "not output.services.web.flask_web_1.state.running" @@ -440,7 +444,7 @@ services: actions: description: Provides the actions to be taken on each service as determined by compose. - returned: when in check mode or I(debug) is C(true) + returned: when in check mode or O(debug=true) type: complex contains: service_name: diff --git a/ansible_collections/community/docker/plugins/modules/docker_compose_v2.py b/ansible_collections/community/docker/plugins/modules/docker_compose_v2.py new file mode 100644 index 000000000..29bb81ad9 --- /dev/null +++ b/ansible_collections/community/docker/plugins/modules/docker_compose_v2.py @@ -0,0 +1,638 @@ +#!/usr/bin/python +# +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# Copyright (c) 2023, Léo El Amri (@lel-amri) +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: docker_compose_v2 + +short_description: Manage multi-container Docker applications with Docker Compose CLI plugin + +version_added: 3.6.0 + +description: + - Uses Docker Compose to start or shutdown services. + +extends_documentation_fragment: + - community.docker.compose_v2 + - community.docker.compose_v2.minimum_version + - community.docker.docker.cli_documentation + - community.docker.attributes + - community.docker.attributes.actiongroup_docker + +attributes: + check_mode: + support: full + details: + - In check mode, pulling the image does not result in a changed result. + diff_mode: + support: none + +options: + state: + description: + - Desired state of the project. + - V(present) is equivalent to running C(docker compose up). + - V(stopped) is equivalent to running C(docker compose stop). + - V(absent) is equivalent to running C(docker compose down). + - V(restarted) is equivalent to running C(docker compose restart). + type: str + default: present + choices: + - absent + - stopped + - restarted + - present + pull: + description: + - Whether to pull images before running. This is used when C(docker compose up) is run. + - V(always) ensures that the images are always pulled, even when already present on the Docker daemon. + - V(missing) only pulls them when they are not present on the Docker daemon. + - V(never) never pulls images. If they are not present, the module will fail when trying to create the containers that need them. + - V(policy) use the Compose file's C(pull_policy) defined for the service to figure out what to do. + type: str + choices: + - always + - missing + - never + - policy + default: policy + build: + description: + - Whether to build images before starting containers. This is used when C(docker compose up) is run. + - V(always) always builds before starting containers. This is equivalent to the C(--build) option of C(docker compose up). + - V(never) never builds before starting containers. This is equivalent to the C(--no-build) option of C(docker compose up). + - V(policy) uses the policy as defined in the Compose file. + type: str + choices: + - always + - never + - policy + default: policy + dependencies: + description: + - When O(state) is V(present) or V(restarted), specify whether or not to include linked services. + type: bool + default: true + recreate: + description: + - By default containers will be recreated when their configuration differs from the service definition. + - Setting to V(never) ignores configuration differences and leaves existing containers unchanged. + - Setting to V(always) forces recreation of all existing containers. + type: str + default: auto + choices: + - always + - never + - auto + remove_images: + description: + - Use with O(state=absent) to remove all images or only local images. + type: str + choices: + - all + - local + remove_volumes: + description: + - Use with O(state=absent) to remove data volumes. + type: bool + default: false + remove_orphans: + description: + - Remove containers for services not defined in the Compose file. + type: bool + default: false + timeout: + description: + - Timeout in seconds for container shutdown when attached or when containers are already running. + type: int + services: + description: + - Specifies a subset of services to be targeted. + type: list + elements: str + scale: + description: + - Define how to scale services when running C(docker compose up). + - Provide a dictionary of key/value pairs where the key is the name of the service + and the value is an integer count for the number of containers. + type: dict + version_added: 3.7.0 + wait: + description: + - When running C(docker compose up), pass C(--wait) to wait for services to be running/healthy. + - A timeout can be set with the O(wait_timeout) option. + type: bool + default: false + version_added: 3.8.0 + wait_timeout: + description: + - When O(wait=true), wait at most this amount of seconds. + type: int + version_added: 3.8.0 + +author: + - Felix Fontein (@felixfontein) + +seealso: + - module: community.docker.docker_compose + - module: community.docker.docker_compose_v2_pull +''' + +EXAMPLES = ''' +# Examples use the django example at https://docs.docker.com/compose/django. Follow it to create the +# flask directory + +- name: Run using a project directory + hosts: localhost + gather_facts: false + tasks: + - name: Tear down existing services + community.docker.docker_compose_v2: + project_src: flask + state: absent + + - name: Create and start services + community.docker.docker_compose_v2: + project_src: flask + register: output + + - name: Show results + ansible.builtin.debug: + var: output + + - name: Run `docker-compose up` again + community.docker.docker_compose_v2: + project_src: flask + register: output + + - name: Show results + ansible.builtin.debug: + var: output + + - ansible.builtin.assert: + that: not output.changed + + - name: Stop all services + community.docker.docker_compose_v2: + project_src: flask + state: stopped + register: output + + - name: Show results + ansible.builtin.debug: + var: output + + - name: Verify that web and db services are not running + ansible.builtin.assert: + that: + - "not output.services.web.flask_web_1.state.running" + - "not output.services.db.flask_db_1.state.running" + + - name: Restart services + community.docker.docker_compose_v2: + project_src: flask + state: restarted + register: output + + - name: Show results + ansible.builtin.debug: + var: output + + - name: Verify that web and db services are running + ansible.builtin.assert: + that: + - "output.services.web.flask_web_1.state.running" + - "output.services.db.flask_db_1.state.running" +''' + +RETURN = ''' +containers: + description: + - A list of containers associated to the service. + returned: success + type: list + elements: dict + contains: + Command: + description: + - The container's command. + type: raw + CreatedAt: + description: + - The timestamp when the container was created. + type: str + sample: "2024-01-02 12:20:41 +0100 CET" + ExitCode: + description: + - The container's exit code. + type: int + Health: + description: + - The container's health check. + type: raw + ID: + description: + - The container's ID. + type: str + sample: "44a7d607219a60b7db0a4817fb3205dce46e91df2cb4b78a6100b6e27b0d3135" + Image: + description: + - The container's image. + type: str + Labels: + description: + - Labels for this container. + type: dict + LocalVolumes: + description: + - The local volumes count. + type: str + Mounts: + description: + - Mounts. + type: str + Name: + description: + - The container's primary name. + type: str + Names: + description: + - List of names of the container. + type: list + elements: str + Networks: + description: + - List of networks attached to this container. + type: list + elements: str + Ports: + description: + - List of port assignments as a string. + type: str + Publishers: + description: + - List of port assigments. + type: list + elements: dict + contains: + URL: + description: + - Interface the port is bound to. + type: str + TargetPort: + description: + - The container's port the published port maps to. + type: int + PublishedPort: + description: + - The port that is published. + type: int + Protocol: + description: + - The protocol. + type: str + choices: + - tcp + - udp + RunningFor: + description: + - Amount of time the container runs. + type: str + Service: + description: + - The name of the service. + type: str + Size: + description: + - The container's size. + type: str + sample: "0B" + State: + description: + - The container's state. + type: str + sample: running + Status: + description: + - The container's status. + type: str + sample: Up About a minute +images: + description: + - A list of images associated to the service. + returned: success + type: list + elements: dict + contains: + ID: + description: + - The image's ID. + type: str + sample: sha256:c8bccc0af9571ec0d006a43acb5a8d08c4ce42b6cc7194dd6eb167976f501ef1 + ContainerName: + description: + - Name of the conainer this image is used by. + type: str + Repository: + description: + - The repository where this image belongs to. + type: str + Tag: + description: + - The tag of the image. + type: str + Size: + description: + - The image's size in bytes. + type: int +actions: + description: + - A list of actions that have been applied. + returned: success + type: list + elements: dict + contains: + what: + description: + - What kind of resource was changed. + type: str + sample: container + choices: + - container + - image + - network + - service + - unknown + - volume + id: + description: + - The ID of the resource that was changed. + type: str + sample: container + status: + description: + - The status change that happened. + type: str + sample: Creating + choices: + - Starting + - Exiting + - Restarting + - Creating + - Stopping + - Killing + - Removing + - Recreating + - Pulling + - Building +''' + +import traceback + +from ansible.module_utils.common.validation import check_type_int +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.six import string_types + +from ansible_collections.community.docker.plugins.module_utils.common_cli import ( + AnsibleModuleDockerClient, + DockerException, +) + +from ansible_collections.community.docker.plugins.module_utils.compose_v2 import ( + BaseComposeManager, + common_compose_argspec, + is_failed, +) + + +class ServicesManager(BaseComposeManager): + def __init__(self, client): + super(ServicesManager, self).__init__(client) + parameters = self.client.module.params + + self.state = parameters['state'] + self.dependencies = parameters['dependencies'] + self.pull = parameters['pull'] + self.build = parameters['build'] + self.recreate = parameters['recreate'] + self.remove_images = parameters['remove_images'] + self.remove_volumes = parameters['remove_volumes'] + self.remove_orphans = parameters['remove_orphans'] + self.timeout = parameters['timeout'] + self.services = parameters['services'] or [] + self.scale = parameters['scale'] or {} + self.wait = parameters['wait'] + self.wait_timeout = parameters['wait_timeout'] + + for key, value in self.scale.items(): + if not isinstance(key, string_types): + self.client.fail('The key %s for `scale` is not a string' % repr(key)) + try: + value = check_type_int(value) + except TypeError as exc: + self.client.fail('The value %s for `scale[%s]` is not an integer' % (repr(value), repr(key))) + if value < 0: + self.client.fail('The value %s for `scale[%s]` is negative' % (repr(value), repr(key))) + self.scale[key] = value + + def run(self): + if self.state == 'present': + result = self.cmd_up() + elif self.state == 'stopped': + result = self.cmd_stop() + elif self.state == 'restarted': + result = self.cmd_restart() + elif self.state == 'absent': + result = self.cmd_down() + + result['containers'] = self.list_containers() + result['images'] = self.list_images() + self.cleanup_result(result) + return result + + def get_up_cmd(self, dry_run, no_start=False): + args = self.get_base_args() + ['up', '--detach', '--no-color', '--quiet-pull'] + if self.pull != 'policy': + args.extend(['--pull', self.pull]) + if self.remove_orphans: + args.append('--remove-orphans') + if self.recreate == 'always': + args.append('--force-recreate') + if self.recreate == 'never': + args.append('--no-recreate') + if not self.dependencies: + args.append('--no-deps') + if self.timeout is not None: + args.extend(['--timeout', '%d' % self.timeout]) + if self.build == 'always': + args.append('--build') + elif self.build == 'never': + args.append('--no-build') + for key, value in sorted(self.scale.items()): + args.extend(['--scale', '%s=%d' % (key, value)]) + if self.wait: + args.append('--wait') + if self.wait_timeout is not None: + args.extend(['--wait-timeout', str(self.wait_timeout)]) + if no_start: + args.append('--no-start') + if dry_run: + args.append('--dry-run') + for service in self.services: + args.append(service) + args.append('--') + return args + + def cmd_up(self): + result = dict() + args = self.get_up_cmd(self.check_mode) + rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src) + events = self.parse_events(stderr, dry_run=self.check_mode) + self.emit_warnings(events) + self.update_result(result, events, stdout, stderr, ignore_service_pull_events=True) + self.update_failed(result, events, args, stdout, stderr, rc) + return result + + def get_stop_cmd(self, dry_run): + args = self.get_base_args() + ['stop'] + if self.timeout is not None: + args.extend(['--timeout', '%d' % self.timeout]) + if dry_run: + args.append('--dry-run') + for service in self.services: + args.append(service) + args.append('--') + return args + + def _are_containers_stopped(self): + for container in self.list_containers_raw(): + if container['State'] not in ('created', 'exited', 'stopped', 'killed'): + return False + return True + + def cmd_stop(self): + # Since 'docker compose stop' **always** claims its stopping containers, even if they are already + # stopped, we have to do this a bit more complicated. + + result = dict() + # Make sure all containers are created + args_1 = self.get_up_cmd(self.check_mode, no_start=True) + rc_1, stdout_1, stderr_1 = self.client.call_cli(*args_1, cwd=self.project_src) + events_1 = self.parse_events(stderr_1, dry_run=self.check_mode) + self.emit_warnings(events_1) + self.update_result(result, events_1, stdout_1, stderr_1, ignore_service_pull_events=True) + is_failed_1 = is_failed(events_1, rc_1) + if not is_failed_1 and not self._are_containers_stopped(): + # Make sure all containers are stopped + args_2 = self.get_stop_cmd(self.check_mode) + rc_2, stdout_2, stderr_2 = self.client.call_cli(*args_2, cwd=self.project_src) + events_2 = self.parse_events(stderr_2, dry_run=self.check_mode) + self.emit_warnings(events_2) + self.update_result(result, events_2, stdout_2, stderr_2) + else: + args_2 = [] + rc_2, stdout_2, stderr_2 = 0, b'', b'' + events_2 = [] + # Compose result + self.update_failed( + result, + events_1 + events_2, + args_1 if is_failed_1 else args_2, + stdout_1 if is_failed_1 else stdout_2, + stderr_1 if is_failed_1 else stderr_2, + rc_1 if is_failed_1 else rc_2, + ) + return result + + def get_restart_cmd(self, dry_run): + args = self.get_base_args() + ['restart'] + if not self.dependencies: + args.append('--no-deps') + if self.timeout is not None: + args.extend(['--timeout', '%d' % self.timeout]) + if dry_run: + args.append('--dry-run') + for service in self.services: + args.append(service) + args.append('--') + return args + + def cmd_restart(self): + result = dict() + args = self.get_restart_cmd(self.check_mode) + rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src) + events = self.parse_events(stderr, dry_run=self.check_mode) + self.emit_warnings(events) + self.update_result(result, events, stdout, stderr) + self.update_failed(result, events, args, stdout, stderr, rc) + return result + + def get_down_cmd(self, dry_run): + args = self.get_base_args() + ['down'] + if self.remove_orphans: + args.append('--remove-orphans') + if self.remove_images: + args.extend(['--rmi', self.remove_images]) + if self.remove_volumes: + args.append('--volumes') + if self.timeout is not None: + args.extend(['--timeout', '%d' % self.timeout]) + if dry_run: + args.append('--dry-run') + for service in self.services: + args.append(service) + args.append('--') + return args + + def cmd_down(self): + result = dict() + args = self.get_down_cmd(self.check_mode) + rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src) + events = self.parse_events(stderr, dry_run=self.check_mode) + self.emit_warnings(events) + self.update_result(result, events, stdout, stderr) + self.update_failed(result, events, args, stdout, stderr, rc) + return result + + +def main(): + argument_spec = dict( + state=dict(type='str', default='present', choices=['absent', 'present', 'stopped', 'restarted']), + dependencies=dict(type='bool', default=True), + pull=dict(type='str', choices=['always', 'missing', 'never', 'policy'], default='policy'), + build=dict(type='str', choices=['always', 'never', 'policy'], default='policy'), + recreate=dict(type='str', default='auto', choices=['always', 'never', 'auto']), + remove_images=dict(type='str', choices=['all', 'local']), + remove_volumes=dict(type='bool', default=False), + remove_orphans=dict(type='bool', default=False), + timeout=dict(type='int'), + services=dict(type='list', elements='str'), + scale=dict(type='dict'), + wait=dict(type='bool', default=False), + wait_timeout=dict(type='int'), + ) + argument_spec.update(common_compose_argspec()) + + client = AnsibleModuleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + result = ServicesManager(client).run() + client.module.exit_json(**result) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/docker/plugins/modules/docker_compose_v2_pull.py b/ansible_collections/community/docker/plugins/modules/docker_compose_v2_pull.py new file mode 100644 index 000000000..2b1980bf6 --- /dev/null +++ b/ansible_collections/community/docker/plugins/modules/docker_compose_v2_pull.py @@ -0,0 +1,163 @@ +#!/usr/bin/python +# +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' + +module: docker_compose_v2_pull + +short_description: Pull a Docker compose project + +version_added: 3.6.0 + +description: + - Uses Docker Compose to pull images for a project. + +extends_documentation_fragment: + - community.docker.compose_v2 + - community.docker.compose_v2.minimum_version + - community.docker.docker.cli_documentation + - community.docker.attributes + - community.docker.attributes.actiongroup_docker + +attributes: + check_mode: + support: full + details: + - If O(policy=always), the module will always indicate a change. + Docker Compose does not give any information whether pulling would + update the image or not. + diff_mode: + support: none + +options: + policy: + description: + - Whether to pull images before running. This is used when C(docker compose up) is ran. + - V(always) ensures that the images are always pulled, even when already present on the Docker daemon. + - V(missing) only pulls them when they are not present on the Docker daemon. This is only supported since Docker Compose 2.22.0. + type: str + choices: + - always + - missing + default: always + +author: + - Felix Fontein (@felixfontein) + +seealso: + - module: community.docker.docker_compose_v2 +''' + +EXAMPLES = ''' +- name: Pull images for flask project + community.docker.docker_compose_v2_pull: + project_src: /path/to/flask +''' + +RETURN = ''' +actions: + description: + - A list of actions that have been applied. + returned: success + type: list + elements: dict + contains: + what: + description: + - What kind of resource was changed. + type: str + sample: container + choices: + - image + - unknown + id: + description: + - The ID of the resource that was changed. + type: str + sample: container + status: + description: + - The status change that happened. + type: str + sample: Pulling + choices: + - Pulling +''' + +import traceback + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.docker.plugins.module_utils.common_cli import ( + AnsibleModuleDockerClient, + DockerException, +) + +from ansible_collections.community.docker.plugins.module_utils.compose_v2 import ( + BaseComposeManager, + common_compose_argspec, +) + +from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion + + +class PullManager(BaseComposeManager): + def __init__(self, client): + super(PullManager, self).__init__(client) + parameters = self.client.module.params + + self.policy = parameters['policy'] + + if self.policy != 'always' and self.compose_version < LooseVersion('2.22.0'): + # https://github.com/docker/compose/pull/10981 - 2.22.0 + self.client.fail('A pull policy other than always is only supported since Docker Compose 2.22.0. {0} has version {1}'.format( + self.client.get_cli(), self.compose_version)) + + def get_pull_cmd(self, dry_run, no_start=False): + args = self.get_base_args() + ['pull'] + if self.policy != 'always': + args.extend(['--policy', self.policy]) + if dry_run: + args.append('--dry-run') + args.append('--') + return args + + def run(self): + result = dict() + args = self.get_pull_cmd(self.check_mode) + rc, stdout, stderr = self.client.call_cli(*args, cwd=self.project_src) + events = self.parse_events(stderr, dry_run=self.check_mode) + self.emit_warnings(events) + self.update_result(result, events, stdout, stderr, ignore_service_pull_events=self.policy != 'missing' and not self.check_mode) + self.update_failed(result, events, args, stdout, stderr, rc) + self.cleanup_result(result) + return result + + +def main(): + argument_spec = dict( + policy=dict(type='str', choices=['always', 'missing'], default='always'), + ) + argument_spec.update(common_compose_argspec()) + + client = AnsibleModuleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + result = PullManager(client).run() + client.module.exit_json(**result) + except DockerException as e: + client.fail('An unexpected docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/docker/plugins/modules/docker_config.py b/ansible_collections/community/docker/plugins/modules/docker_config.py index 9f55e0f0d..86654e78b 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_config.py +++ b/ansible_collections/community/docker/plugins/modules/docker_config.py @@ -18,7 +18,7 @@ description: - Create and remove Docker configs in a Swarm environment. Similar to C(docker config create) and C(docker config rm). - Adds to the metadata of new configs 'ansible_key', an encrypted hash representation of the data, which is then used in future runs to test if a config has changed. If 'ansible_key' is not present, then a config will not be updated - unless the I(force) option is set. + unless the O(force) option is set. - Updates to configs are performed by removing the config and creating it again. extends_documentation_fragment: @@ -37,45 +37,45 @@ options: data: description: - The value of the config. - - Mutually exclusive with I(data_src). One of I(data) and I(data_src) is required if I(state=present). + - Mutually exclusive with O(data_src). One of O(data) and O(data_src) is required if O(state=present). type: str data_is_b64: description: - - If set to C(true), the data is assumed to be Base64 encoded and will be + - If set to V(true), the data is assumed to be Base64 encoded and will be decoded before being used. - - To use binary I(data), it is better to keep it Base64 encoded and let it + - To use binary O(data), it is better to keep it Base64 encoded and let it be decoded by this option. type: bool default: false data_src: description: - The file on the target from which to read the config. - - Mutually exclusive with I(data). One of I(data) and I(data_src) is required if I(state=present). + - Mutually exclusive with O(data). One of O(data) and O(data_src) is required if O(state=present). type: path version_added: 1.10.0 labels: description: - - "A map of key:value meta data, where both the I(key) and I(value) are expected to be a string." + - "A map of key:value meta data, where both the C(key) and C(value) are expected to be a string." - If new meta data is provided, or existing meta data is modified, the config will be updated by removing it and creating it again. type: dict force: description: - - Use with state C(present) to always remove and recreate an existing config. - - If C(true), an existing config will be replaced, even if it has not been changed. + - Use with O(state=present) to always remove and recreate an existing config. + - If V(true), an existing config will be replaced, even if it has not been changed. type: bool default: false rolling_versions: description: - - If set to C(true), configs are created with an increasing version number appended to their name. + - If set to V(true), configs are created with an increasing version number appended to their name. - Adds a label containing the version number to the managed configs with the name C(ansible_version). type: bool default: false version_added: 2.2.0 versions_to_keep: description: - - When using I(rolling_versions), the number of old versions of the config to keep. + - When using O(rolling_versions), the number of old versions of the config to keep. - Extraneous old configs are deleted after the new one is created. - - Set to C(-1) to keep everything or to C(0) or C(1) to keep only the current one. + - Set to V(-1) to keep everything or V(0) or V(1) to keep only the current one. type: int default: 5 version_added: 2.2.0 @@ -86,7 +86,7 @@ options: required: true state: description: - - Set to C(present), if the config should exist, and C(absent), if it should not. + - Set to V(present), if the config should exist, and V(absent), if it should not. type: str default: present choices: @@ -94,7 +94,7 @@ options: - present template_driver: description: - - Set to C(golang) to use a Go template in I(data) or a Go template file in I(data_src). + - Set to V(golang) to use a Go template in O(data) or a Go template file in O(data_src). type: str choices: - golang @@ -183,13 +183,13 @@ RETURN = ''' config_id: description: - The ID assigned by Docker to the config object. - returned: success and I(state) is C(present) + returned: success and O(state=present) type: str sample: 'hzehrmyjigmcp2gb6nlhmjqcv' config_name: description: - The name of the created config object. - returned: success and I(state) is C(present) + returned: success and O(state=present) type: str sample: 'awesome_config' version_added: 2.2.0 diff --git a/ansible_collections/community/docker/plugins/modules/docker_container.py b/ansible_collections/community/docker/plugins/modules/docker_container.py index 9d1ed416e..d7dbc3780 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_container.py +++ b/ansible_collections/community/docker/plugins/modules/docker_container.py @@ -20,11 +20,11 @@ description: notes: - For most config changes, the container needs to be recreated. This means that the existing container has to be destroyed and - a new one created. This can cause unexpected data loss and downtime. You can use the I(comparisons) option to + a new one created. This can cause unexpected data loss and downtime. You can use the O(comparisons) option to prevent this. - If the module needs to recreate the container, it will only use the options provided to the module to create the - new container (except I(image)). Therefore, always specify B(all) options relevant to the container. - - When I(restart) is set to C(true), the module will only restart the container if no config changes are detected. + new container (except O(image)). Therefore, always specify B(all) options relevant to the container. + - When O(restart) is set to V(true), the module will only restart the container if no config changes are detected. extends_documentation_fragment: - community.docker.docker.api_documentation @@ -35,7 +35,8 @@ attributes: check_mode: support: partial details: - - When trying to pull an image, the module assumes this is always changed in check mode. + - When trying to pull an image, the module assumes this is never changed in check mode except when the image is not present on the Docker daemon. + - This behavior can be configured with O(pull_check_mode_behavior). diff_mode: support: full @@ -43,7 +44,7 @@ options: auto_remove: description: - Enable auto-removal of the container on daemon side when the container's process exits. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + - If O(container_default_behavior=compatibility), this option has a default of V(false). type: bool blkio_weight: description: @@ -76,14 +77,14 @@ options: version_added: 1.1.0 cleanup: description: - - Use with I(detach=false) to remove the container after successful execution. + - Use with O(detach=false) to remove the container after successful execution. type: bool default: false command: description: - Command to execute when the container starts. A command may be either a string or a list. - Prior to version 2.4, strings were split on commas. - - See I(command_handling) for differences in how strings and lists are handled. + - See O(command_handling) for differences in how strings and lists are handled. type: raw comparisons: description: @@ -91,30 +92,30 @@ options: module options to decide whether the container should be recreated / updated or not. - Only options which correspond to the state of a container as handled by the - Docker daemon can be specified, as well as I(networks). - - Must be a dictionary specifying for an option one of the keys C(strict), C(ignore) - and C(allow_more_present). - - If C(strict) is specified, values are tested for equality, and changes always - result in updating or restarting. If C(ignore) is specified, changes are ignored. - - C(allow_more_present) is allowed only for lists, sets and dicts. If it is + Docker daemon can be specified, as well as O(networks). + - Must be a dictionary specifying for an option one of the keys V(strict), V(ignore) + and V(allow_more_present). + - If V(strict) is specified, values are tested for equality, and changes always + result in updating or restarting. If V(ignore) is specified, changes are ignored. + - V(allow_more_present) is allowed only for lists, sets and dicts. If it is specified for lists or sets, the container will only be updated or restarted if the module option contains a value which is not present in the container's options. If the option is specified for a dict, the container will only be updated or restarted if the module option contains a key which is not present in the container's option, or if the value of a key present differs. - - The wildcard option C(*) can be used to set one of the default values C(strict) - or C(ignore) to I(all) comparisons which are not explicitly set to other values. + - The wildcard option C(*) can be used to set one of the default values V(strict) + or V(ignore) to I(all) comparisons which are not explicitly set to other values. - See the examples for details. type: dict container_default_behavior: description: - In older versions of this module, various module options used to have default values. This caused problems with containers which use different values for these options. - - The default value is now C(no_defaults). To restore the old behavior, set it to - C(compatibility), which will ensure that the default values are used when the values + - The default value is now V(no_defaults). To restore the old behavior, set it to + V(compatibility), which will ensure that the default values are used when the values are not explicitly specified by the user. - - This affects the I(auto_remove), I(detach), I(init), I(interactive), I(memory), - I(paused), I(privileged), I(read_only) and I(tty) options. + - This affects the O(auto_remove), O(detach), O(init), O(interactive), O(memory), + O(paused), O(privileged), O(read_only), and O(tty) options. type: str choices: - compatibility @@ -122,15 +123,15 @@ options: default: no_defaults command_handling: description: - - The default behavior for I(command) (when provided as a list) and I(entrypoint) is to + - The default behavior for O(command) (when provided as a list) and O(entrypoint) is to convert them to strings without considering shell quoting rules. (For comparing idempotency, the resulting string is split considering shell quoting rules.) - - Also, setting I(command) to an empty list of string, and setting I(entrypoint) to an empty + - Also, setting O(command) to an empty list of string, and setting O(entrypoint) to an empty list will be handled as if these options are not specified. This is different from idempotency handling for other container-config related options. - - When this is set to C(compatibility), which was the default until community.docker 3.0.0, the + - When this is set to V(compatibility), which was the default until community.docker 3.0.0, the current behavior will be kept. - - When this is set to C(correct), these options are kept as lists, and an empty value or empty + - When this is set to V(correct), these options are kept as lists, and an empty value or empty list will be handled correctly for idempotency checks. This has been the default since community.docker 3.0.0. type: str @@ -142,25 +143,26 @@ options: cpu_period: description: - Limit CPU CFS (Completely Fair Scheduler) period. - - See I(cpus) for an easier to use alternative. + - See O(cpus) for an easier to use alternative. type: int cpu_quota: description: - Limit CPU CFS (Completely Fair Scheduler) quota. - - See I(cpus) for an easier to use alternative. + - See O(cpus) for an easier to use alternative. type: int cpus: description: - Specify how much of the available CPU resources a container can use. - - A value of C(1.5) means that at most one and a half CPU (core) will be used. + - A value of V(1.5) means that at most one and a half CPU (core) will be used. type: float cpuset_cpus: description: - - CPUs in which to allow execution C(1,3) or C(1-3). + - CPUs in which to allow execution. + - For example V(1,3) or V(1-3). type: str cpuset_mems: description: - - Memory nodes (MEMs) in which to allow execution C(0-3) or C(0,1). + - Memory nodes (MEMs) in which to allow execution V(0-3) or V(0,1). type: str cpu_shares: description: @@ -170,19 +172,19 @@ options: description: - Define the default host IP to use. - Must be an empty string, an IPv4 address, or an IPv6 address. - - With Docker 20.10.2 or newer, this should be set to an empty string (C("")) to avoid the + - With Docker 20.10.2 or newer, this should be set to an empty string (V("")) to avoid the port bindings without an explicit IP address to only bind to IPv4. See U(https://github.com/ansible-collections/community.docker/issues/70) for details. - By default, the module will try to auto-detect this value from the C(bridge) network's C(com.docker.network.bridge.host_binding_ipv4) option. If it cannot auto-detect it, it - will fall back to C(0.0.0.0). + will fall back to V(0.0.0.0). type: str version_added: 1.2.0 detach: description: - Enable detached mode to leave the container running in background. - If disabled, the task will reflect the status of the container run (failed if the command failed). - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(true). + - If O(container_default_behavior=compatibility), this option has a default of V(true). type: bool devices: description: @@ -204,8 +206,8 @@ options: rate: description: - "Device read limit in format C(<number>[<unit>])." - - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." + - "Number is a positive integer. Unit can be one of V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." - "Omitting the unit defaults to bytes." type: str required: true @@ -223,8 +225,8 @@ options: rate: description: - "Device read limit in format C(<number>[<unit>])." - - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." + - "Number is a positive integer. Unit can be one of V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." - "Omitting the unit defaults to bytes." type: str required: true @@ -281,7 +283,7 @@ options: count: description: - Number or devices to request. - - Set to C(-1) to request all available devices. + - Set to V(-1) to request all available devices. type: int device_ids: description: @@ -319,19 +321,19 @@ options: env: description: - Dictionary of key,value pairs. - - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example C("true")) in order to avoid data loss. - - Please note that if you are passing values in with Jinja2 templates, like C("{{ value }}"), you need to add C(| string) to prevent Ansible to - convert strings such as C("true") back to booleans. The correct way is to use C("{{ value | string }}"). + - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example V("true")) in order to avoid data loss. + - Please note that if you are passing values in with Jinja2 templates, like V("{{ value }}"), you need to add V(| string) to prevent Ansible to + convert strings such as V("true") back to booleans. The correct way is to use V("{{ value | string }}"). type: dict env_file: description: - - Path to a file, present on the target, containing environment variables I(FOO=BAR). - - If variable also present in I(env), then the I(env) value will override. + - Path to a file, present on the target, containing environment variables C(FOO=BAR). + - If variable also present in O(env), then the O(env) value will override. type: path entrypoint: description: - Command that overwrites the default C(ENTRYPOINT) of the image. - - See I(command_handling) for differences in how strings and lists are handled. + - See O(command_handling) for differences in how strings and lists are handled. type: list elements: str etc_hosts: @@ -367,34 +369,35 @@ options: - Configure a check that is run to determine whether or not containers for this service are "healthy". - "See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) for details on how healthchecks work." - - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format - that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - "O(healthcheck.interval), O(healthcheck.timeout) and O(healthcheck.start_period) are specified as durations. + They accept duration as a string in a format that look like: V(5h34m56s), V(1m30s), and so on. + The supported units are V(us), V(ms), V(s), V(m) and V(h)." type: dict suboptions: test: description: - Command to run to check health. - - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL). + - Must be either a string or a list. If it is a list, the first item must be one of V(NONE), V(CMD) or V(CMD-SHELL). type: raw interval: description: - Time between running the check. - - The default used by the Docker daemon is C(30s). + - The default used by the Docker daemon is V(30s). type: str timeout: description: - Maximum time to allow one check to run. - - The default used by the Docker daemon is C(30s). + - The default used by the Docker daemon is V(30s). type: str retries: description: - Consecutive number of failures needed to report unhealthy. - - The default used by the Docker daemon is C(3). + - The default used by the Docker daemon is V(3). type: int start_period: description: - Start period for the container to initialize before starting health-retries countdown. - - The default used by the Docker daemon is C(0s). + - The default used by the Docker daemon is V(0s). type: str hostname: description: @@ -402,30 +405,30 @@ options: type: str ignore_image: description: - - When I(state) is C(present) or C(started), the module compares the configuration of an existing + - When O(state) is V(present) or V(started), the module compares the configuration of an existing container to requested configuration. The evaluation includes the image version. If the image version in the registry does not match the container, the container will be recreated. You can - stop this behavior by setting I(ignore_image) to C(true). + stop this behavior by setting O(ignore_image) to V(true). - "B(Warning:) This option is ignored if C(image: ignore) or C(*: ignore) is specified in the - I(comparisons) option." + O(comparisons) option." - "This option is deprecated since community.docker 3.2.0 and will be removed in community.docker 4.0.0. - Use C(image: ignore) in I(comparisons) instead of I(ignore_image=true)." + Use C(image: ignore) in O(comparisons) instead of O(ignore_image=true)." type: bool default: false image: description: - Repository path and tag used to create the container. If an image is not found or pull is true, the image - will be pulled from the registry. If no tag is included, C(latest) will be used. + will be pulled from the registry. If no tag is included, V(latest) will be used. - Can also be an image ID. If this is the case, the image is assumed to be available locally. - The I(pull) option is ignored for this case. + The O(pull) option is ignored for this case. type: str image_comparison: description: - Determines which image to use for idempotency checks that depend on image parameters. - - The default, C(desired-image), will use the image that is provided to the module via the I(image) parameter. - - C(current-image) will use the image that the container is currently using, if the container exists. It + - The default, V(desired-image), will use the image that is provided to the module via the O(image) parameter. + - V(current-image) will use the image that the container is currently using, if the container exists. It falls back to the image that is provided in case the container does not yet exist. - - This affects the I(env), I(env_file), I(exposed_ports), I(labels), and I(volumes) options. + - This affects the O(env), O(env_file), O(exposed_ports), O(labels), and O(volumes) options. type: str choices: - desired-image @@ -435,13 +438,13 @@ options: image_label_mismatch: description: - How to handle labels inherited from the image that are not set explicitly. - - When C(ignore), labels that are present in the image but not specified in I(labels) will be - ignored. This is useful to avoid having to specify the image labels in I(labels) while keeping - labels I(comparisons) C(strict). - - When C(fail), if there are labels present in the image which are not set from I(labels), the + - When V(ignore), labels that are present in the image but not specified in O(labels) will be + ignored. This is useful to avoid having to specify the image labels in O(labels) while keeping + labels O(comparisons) V(strict). + - When V(fail), if there are labels present in the image which are not set from O(labels), the module will fail. This prevents introducing unexpected labels from the base image. - "B(Warning:) This option is ignored unless C(labels: strict) or C(*: strict) is specified in - the I(comparisons) option." + the O(comparisons) option." type: str choices: - 'ignore' @@ -452,30 +455,31 @@ options: description: - Determines what the module does if the image matches, but the image name in the container's configuration does not match the image name provided to the module. - - "This is ignored if C(image: ignore) is set in I(comparisons)." - - If set to C(recreate) the container will be recreated. - - If set to C(ignore) the container will not be recreated because of this. It might still get recreated for other reasons. + - "This is ignored if C(image: ignore) is set in O(comparisons)." + - If set to V(recreate) the container will be recreated. + - If set to V(ignore) (currently the default) the container will not be recreated because of this. It might still get recreated for other reasons. This has been the default behavior of the module for a long time, but might not be what users expect. + - Since community.docker 3.5.0, the default V(ignore) has been deprecated. If not specified, a deprecation warning + will be emitted if this setting would make a difference. The default will change to V(recreate) in community.docker 4.0.0. type: str choices: - recreate - ignore - default: ignore version_added: 3.2.0 init: description: - Run an init inside the container that forwards signals and reaps processes. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + - If O(container_default_behavior=compatibility), this option has a default of V(false). type: bool interactive: description: - Keep stdin open after a container is launched, even if not attached. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + - If O(container_default_behavior=compatibility), this option has a default of V(false). type: bool ipc_mode: description: - Set the IPC mode for the container. - - Can be one of C(container:<name|id>) to reuse another container's IPC namespace or C(host) to use + - Can be one of V(container:<name|id>) to reuse another container's IPC namespace or V(host) to use the host's IPC namespace within the container. type: str keep_volumes: @@ -490,8 +494,8 @@ options: kernel_memory: description: - "Kernel memory limit in format C(<number>[<unit>]). Number is a positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte). Minimum is C(4M)." + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte). Minimum is V(4M)." - Omitting the unit defaults to bytes. type: str labels: @@ -506,42 +510,45 @@ options: elements: str log_driver: description: - - Specify the logging driver. Docker uses C(json-file) by default. - - See L(here,https://docs.docker.com/config/containers/logging/configure/) for possible choices. + - Specify the logging driver. Docker uses V(json-file) by default. + - See L(the Docker logging configuration documentation,https://docs.docker.com/config/containers/logging/configure/) + for possible choices. type: str log_options: description: - - Dictionary of options specific to the chosen I(log_driver). + - Dictionary of options specific to the chosen O(log_driver). - See U(https://docs.docker.com/engine/admin/logging/overview/) for details. - - I(log_driver) needs to be specified for I(log_options) to take effect, even if using the default C(json-file) driver. + - O(log_driver) needs to be specified for O(log_options) to take effect, even if using the default V(json-file) driver. type: dict aliases: - log_opt mac_address: description: - - Container MAC address (for example, C(92:d0:c6:0a:29:33)). + - Container MAC address (for example, V(92:d0:c6:0a:29:33)). + - Note that the global container-wide MAC address is deprecated and no longer used since Docker API version 1.44. + - Use O(networks[].mac_address) instead. type: str memory: description: - "Memory limit in format C(<number>[<unit>]). Number is a positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." - Omitting the unit defaults to bytes. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C("0"). + - If O(container_default_behavior=compatibility), this option has a default of V("0"). type: str memory_reservation: description: - "Memory soft limit in format C(<number>[<unit>]). Number is a positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." - Omitting the unit defaults to bytes. type: str memory_swap: description: - "Total memory limit (memory + swap) in format C(<number>[<unit>]), or - the special values C(unlimited) or C(-1) for unlimited swap usage. - Number is a positive integer. Unit can be C(B) (byte), C(K) (kibibyte, 1024B), - C(M) (mebibyte), C(G) (gibibyte), C(T) (tebibyte), or C(P) (pebibyte)." + the special values V(unlimited) or V(-1) for unlimited swap usage. + Number is a positive integer. Unit can be V(B) (byte), V(K) (kibibyte, 1024B), + V(M) (mebibyte), V(G) (gibibyte), V(T) (tebibyte), or V(P) (pebibyte)." - Omitting the unit defaults to bytes. type: str memory_swappiness: @@ -554,7 +561,7 @@ options: type: list elements: dict description: - - Specification for mounts to be added to the container. More powerful alternative to I(volumes). + - Specification for mounts to be added to the container. More powerful alternative to O(volumes). suboptions: target: description: @@ -565,12 +572,12 @@ options: description: - Mount source. - For example, this can be a volume name or a host path. - - If not supplied when I(type=volume) an anonymous volume will be created. + - If not supplied when O(mounts[].type=volume) an anonymous volume will be created. type: str type: description: - The mount type. - - Note that C(npipe) is only supported by Docker for Windows. + - Note that V(npipe) is only supported by Docker for Windows. type: str choices: - bind @@ -593,7 +600,7 @@ options: - delegated propagation: description: - - Propagation mode. Only valid for the C(bind) type. + - Propagation mode. Only valid for the V(bind) type. type: str choices: - private @@ -604,16 +611,16 @@ options: - rslave no_copy: description: - - False if the volume should be populated with the data from the target. Only valid for the C(volume) type. - - The default value is C(false). + - False if the volume should be populated with the data from the target. Only valid for the V(volume) type. + - The default value is V(false). type: bool labels: description: - - User-defined name and labels for the volume. Only valid for the C(volume) type. + - User-defined name and labels for the volume. Only valid for the V(volume) type. type: dict volume_driver: description: - - Specify the volume driver. Only valid for the C(volume) type. + - Specify the volume driver. Only valid for the V(volume) type. - See L(here,https://docs.docker.com/storage/volumes/#use-a-volume-driver) for details. type: str volume_options: @@ -624,8 +631,8 @@ options: tmpfs_size: description: - "The size for the tmpfs mount in bytes in format <number>[<unit>]." - - "Number is a positive integer. Unit can be one of C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." + - "Number is a positive integer. Unit can be one of V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." - "Omitting the unit defaults to bytes." type: str tmpfs_mode: @@ -640,24 +647,24 @@ options: required: true network_mode: description: - - Connect the container to a network. Choices are C(bridge), C(host), C(none), C(container:<name|id>), C(<network_name>) or C(default). - - "Since community.docker 2.0.0, if I(networks_cli_compatible) is C(true) and I(networks) contains at least one network, - the default value for I(network_mode) is the name of the first network in the I(networks) list. You can prevent this - by explicitly specifying a value for I(network_mode), like the default value C(default) which will be used by Docker if - I(network_mode) is not specified." + - Connect the container to a network. Choices are V(bridge), V(host), V(none), C(container:<name|id>), C(<network_name>) or V(default). + - "Since community.docker 2.0.0, if O(networks_cli_compatible=true) and O(networks) contains at least one network, + the default value for O(network_mode) is the name of the first network in the O(networks) list. You can prevent this + by explicitly specifying a value for O(network_mode), like the default value V(default) which will be used by Docker if + O(network_mode) is not specified." type: str userns_mode: description: - - Set the user namespace mode for the container. Currently, the only valid value are C(host) and the empty string. + - Set the user namespace mode for the container. Currently, the only valid value are V(host) and the empty string (V("")). type: str networks: description: - List of networks the container belongs to. - For examples of the data structure and usage see EXAMPLES below. - - "To remove a container from one or more networks, use C(networks: strict) in the I(comparisons) option." - - "If I(networks_cli_compatible) is set to C(false), this will not remove the default network if I(networks) is specified. - This is different from the behavior of C(docker run ...). You need to explicitly use C(networks: strict) in I(comparisons) - to enforce the removal of the default network (and all other networks not explicitly mentioned in I(networks)) in that case." + - "To remove a container from one or more networks, use C(networks: strict) in the O(comparisons) option." + - "If O(networks_cli_compatible=false), this will not remove the default network if O(networks) is specified. + This is different from the behavior of C(docker run ...). You need to explicitly use C(networks: strict) in O(comparisons) + to enforce the removal of the default network (and all other networks not explicitly mentioned in O(networks)) in that case." type: list elements: dict suboptions: @@ -685,17 +692,23 @@ options: can be used in the network to reach this container. type: list elements: str + mac_address: + description: + - Endpoint MAC address (for example, V(92:d0:c6:0a:29:33)). + - This is only available for Docker API version 1.44 and later. + type: str + version_added: 3.6.0 networks_cli_compatible: description: - - "If I(networks_cli_compatible) is set to C(true) (default), this module will behave as - C(docker run --network) and will B(not) add the default network if I(networks) is - specified. If I(networks) is not specified, the default network will be attached." - - "When I(networks_cli_compatible) is set to C(false) and networks are provided to the module - via the I(networks) option, the module behaves differently than C(docker run --network): + - "If O(networks_cli_compatible=true) (default), this module will behave as + C(docker run --network) and will B(not) add the default network if O(networks) is + specified. If O(networks) is not specified, the default network will be attached." + - "When O(networks_cli_compatible=false) and networks are provided to the module + via the O(networks) option, the module behaves differently than C(docker run --network): C(docker run --network other) will create a container with network C(other) attached, - but the default network not attached. This module with I(networks: {name: other}) will + but the default network not attached. This module with O(networks) set to C({name: other}) will create a container with both C(default) and C(other) attached. If C(networks: strict) - or C(*: strict) is set in I(comparisons), the C(default) network will be removed afterwards." + or C(*: strict) is set in O(comparisons), the C(default) network will be removed afterwards." type: bool default: true oom_killer: @@ -710,13 +723,13 @@ options: output_logs: description: - If set to true, output of the container command will be printed. - - Only effective when I(log_driver) is set to C(json-file), C(journald), or C(local). + - Only effective when O(log_driver) is set to V(json-file), V(journald), or V(local). type: bool default: false paused: description: - Use with the started state to pause running processes inside the container. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + - If O(container_default_behavior=compatibility), this option has a default of V(false). type: bool pid_mode: description: @@ -725,31 +738,34 @@ options: pids_limit: description: - Set PIDs limit for the container. It accepts an integer value. - - Set C(-1) for unlimited PIDs. + - Set V(-1) for unlimited PIDs. type: int platform: description: - Platform for the container in the format C(os[/arch[/variant]]). - - "Please note that inspecting the container does not always return the exact platform string used to - create the container. This can cause idempotency to break for this module. Use the I(comparisons) option - with C(platform: ignore) to prevent accidental recreation of the container due to this." + - "Note that since community.docker 3.5.0, the module uses both the image's metadata and the Docker + daemon's information to normalize platform strings similarly to how Docker itself is doing this. + If you notice idempotency problems, L(please create an issue in the community.docker GitHub repository, + https://github.com/ansible-collections/community.docker/issues/new?assignees=&labels=&projects=&template=bug_report.md). + For older community.docker versions, you can use the O(comparisons) option with C(platform: ignore) + to prevent accidental recreation of the container due to this." type: str version_added: 3.0.0 privileged: description: - Give extended privileges to the container. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + - If O(container_default_behavior=compatibility), this option has a default of V(false). type: bool publish_all_ports: description: - Publish all ports to the host. - - Any specified port bindings from I(published_ports) will remain intact when C(true). + - Any specified port bindings from O(published_ports) will remain intact when V(true). type: bool version_added: 1.8.0 published_ports: description: - List of ports to publish from the container to the host. - - "Use docker CLI syntax: C(8000), C(9000:8000), or C(0.0.0.0:9000:8000), where 8000 is a + - "Use docker CLI syntax: V(8000), V(9000:8000), or V(0.0.0.0:9000:8000), where 8000 is a container port, 9000 is a host port, and 0.0.0.0 is a host interface." - Port ranges can be used for source and destination ports. If two ranges with different lengths are specified, the shorter range will be used. @@ -757,41 +773,66 @@ options: to the first port of the destination range, but to a free port in that range. This is the same behavior as for C(docker) command line utility. - "Bind addresses must be either IPv4 or IPv6 addresses. Hostnames are B(not) allowed. This - is different from the C(docker) command line utility. Use the R(dig lookup,ansible_collections.community.general.dig_lookup) + is different from the C(docker) command line utility. Use the P(community.general.dig#lookup) lookup to resolve hostnames." - - If I(networks) parameter is provided, will inspect each network to see if there exists + - If O(networks) parameter is provided, will inspect each network to see if there exists a bridge network with optional parameter C(com.docker.network.bridge.host_binding_ipv4). If such a network is found, then published ports where no host IP address is specified will be bound to the host IP pointed to by C(com.docker.network.bridge.host_binding_ipv4). Note that the first bridge network with a C(com.docker.network.bridge.host_binding_ipv4) - value encountered in the list of I(networks) is the one that will be used. - - The value C(all) was allowed in earlier versions of this module. Support for it was removed in - community.docker 3.0.0. Use the I(publish_all_ports) option instead. + value encountered in the list of O(networks) is the one that will be used. + - The value V(all) was allowed in earlier versions of this module. Support for it was removed in + community.docker 3.0.0. Use the O(publish_all_ports) option instead. type: list elements: str aliases: - ports pull: description: - - If true, always pull the latest version of an image. Otherwise, will only pull an image - when missing. + - If set to V(never), will never try to pull an image. Will fail if the image is not available + on the Docker daemon. + - If set to V(missing) or V(false), only pull the image if it is not available on the Docker + daemon. This is the default behavior. + - If set to V(always) or V(true), always try to pull the latest version of the image. - "B(Note:) images are only pulled when specified by name. If the image is specified - as a image ID (hash), it cannot be pulled." - type: bool - default: false + as a image ID (hash), it cannot be pulled, and this option is ignored." + - "B(Note:) the values V(never), V(missing), and V(always) are only available since + community.docker 3.8.0. Earlier versions only support V(true) and V(false)." + type: raw + choices: + - never + - missing + - always + - true + - false + default: missing + pull_check_mode_behavior: + description: + - Allows to adjust the behavior when O(pull=always) or O(pull=true) in check mode. + - Since the Docker daemon does not expose any functionality to test whether a pull will result + in a changed image, the module by default acts like O(pull=always) only results in a change when + the image is not present. + - If set to V(image_not_present) (default), only report changes in check mode when the image is not present. + - If set to V(always), always report changes in check mode. + type: str + default: image_not_present + choices: + - image_not_present + - always + version_added: 3.8.0 purge_networks: description: - - Remove the container from ALL networks not included in I(networks) parameter. - - Any default networks such as C(bridge), if not found in I(networks), will be removed as well. + - Remove the container from ALL networks not included in O(networks) parameter. + - Any default networks such as C(bridge), if not found in O(networks), will be removed as well. - "This option is deprecated since community.docker 3.2.0 and will be removed in community.docker 4.0.0. - Use C(networks: strict) in I(comparisons) instead of I(purge_networks=true) and make sure that - I(networks) is specified. If you want to remove all networks, specify I(networks: [])." + Use C(networks: strict) in O(comparisons) instead of O(purge_networks=true) and make sure that + O(networks) is specified. If you want to remove all networks, specify O(networks) as C([])." type: bool default: false read_only: description: - Mount the container's root file system as read-only. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + - If O(container_default_behavior=compatibility), this option has a default of V(false). type: bool recreate: description: @@ -816,7 +857,7 @@ options: restart_policy: description: - Container restart policy. - - Place quotes around C(no) option. + - Place quotes around V(no) option. type: str choices: - 'no' @@ -834,9 +875,9 @@ options: shm_size: description: - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses C(64M). + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." + - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses V(64M). type: str security_opts: description: @@ -845,22 +886,22 @@ options: elements: str state: description: - - 'C(absent) - A container matching the specified name will be stopped and removed. Use I(force_kill) to kill the container - rather than stopping it. Use I(keep_volumes) to retain anonymous volumes associated with the removed container.' - - 'C(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no + - 'V(absent) - A container matching the specified name will be stopped and removed. Use O(force_kill) to kill the container + rather than stopping it. Use O(keep_volumes) to retain anonymous volumes associated with the removed container.' + - 'V(present) - Asserts the existence of a container matching the name and any provided configuration parameters. If no container matches the name, a container will be created. If a container matches the name but the provided configuration does not match, the container will be updated, if it can be. If it cannot be updated, it will be removed and re-created with the requested config.' - - 'C(started) - Asserts that the container is first C(present), and then if the container is not running moves it to a running - state. Use I(restart) to force a matching container to be stopped and restarted.' - - 'C(stopped) - Asserts that the container is first C(present), and then if the container is running moves it to a stopped + - 'V(started) - Asserts that the container is first V(present), and then if the container is not running moves it to a running + state. Use O(restart) to force a matching container to be stopped and restarted.' + - 'V(stopped) - Asserts that the container is first V(present), and then if the container is running moves it to a stopped state.' - - "To control what will be taken into account when comparing configuration, see the I(comparisons) option. To avoid that the - image version will be taken into account, you can also use the C(image: ignore) in the I(comparisons) option." - - Use the I(recreate) option to always force re-creation of a matching container, even if it is running. - - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because I(state) is - C(stopped), please use the I(force_kill) option. Use I(keep_volumes) to retain anonymous volumes associated with a removed container. - - Use I(keep_volumes) to retain anonymous volumes associated with a removed container. + - "To control what will be taken into account when comparing configuration, see the O(comparisons) option. To avoid that the + image version will be taken into account, you can also use the V(image: ignore) in the O(comparisons) option." + - Use the O(recreate) option to always force re-creation of a matching container, even if it is running. + - If the container should be killed instead of stopped in case it needs to be stopped for recreation, or because O(state) is + V(stopped), please use the O(force_kill) option. Use O(keep_volumes) to retain anonymous volumes associated with a removed container. + - Use O(keep_volumes) to retain anonymous volumes associated with a removed container. type: str default: started choices: @@ -896,11 +937,11 @@ options: tty: description: - Allocate a pseudo-TTY. - - If I(container_default_behavior) is set to C(compatibility), this option has a default of C(false). + - If O(container_default_behavior=compatibility), this option has a default of V(false). type: bool ulimits: description: - - "List of ulimit options. A ulimit is specified as C(nofile:262144:262144)." + - "List of ulimit options. A ulimit is specified as V(nofile:262144:262144)." type: list elements: str sysctls: @@ -920,12 +961,12 @@ options: description: - List of volumes to mount within the container. - "Use docker CLI-style syntax: C(/host:/container[:mode])" - - "Mount modes can be a comma-separated list of various modes such as C(ro), C(rw), C(consistent), - C(delegated), C(cached), C(rprivate), C(private), C(rshared), C(shared), C(rslave), C(slave), and - C(nocopy). Note that the docker daemon might not support all modes and combinations of such modes." - - SELinux hosts can additionally use C(z) or C(Z) to use a shared or private label for the volume. - - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of C(ro), C(rw), - C(z), and C(Z)." + - "Mount modes can be a comma-separated list of various modes such as V(ro), V(rw), V(consistent), + V(delegated), V(cached), V(rprivate), V(private), V(rshared), V(shared), V(rslave), V(slave), and + V(nocopy). Note that the docker daemon might not support all modes and combinations of such modes." + - SELinux hosts can additionally use V(z) or V(Z) to use a shared or private label for the volume. + - "Note that Ansible 2.7 and earlier only supported one mode, which had to be one of V(ro), V(rw), + V(z), and V(Z)." type: list elements: str volume_driver: @@ -1225,9 +1266,9 @@ RETURN = ''' container: description: - Facts representing the current state of the container. Matches the docker inspection output. - - Empty if I(state) is C(absent). - - If I(detach=false), will include C(Output) attribute containing any output from container run. - returned: success; or when I(state=started) and I(detach=false), and when waiting for the container result did not fail + - Empty if O(state=absent). + - If O(detach=false), will include C(Output) attribute containing any output from container run. + returned: success; or when O(state=started) and O(detach=false), and when waiting for the container result did not fail type: dict sample: '{ "AppArmorProfile": "", @@ -1265,7 +1306,7 @@ status: description: - In case a container is started without detaching, this contains the exit code of the process in the container. - Before community.docker 1.1.0, this was only returned when non-zero. - returned: when I(state=started) and I(detach=false), and when waiting for the container result did not fail + returned: when O(state=started) and O(detach=false), and when waiting for the container result did not fail type: int sample: 0 ''' diff --git a/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py b/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py index f140bfe6a..2af99152d 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py +++ b/ansible_collections/community/docker/plugins/modules/docker_container_copy_into.py @@ -19,7 +19,7 @@ version_added: 3.4.0 description: - Copy a file into a Docker container. - Similar to C(docker cp). - - To copy files in a non-running container, you must provide the I(owner_id) and I(group_id) options. + - To copy files in a non-running container, you must provide the O(owner_id) and O(group_id) options. This is also necessary if the container does not contain a C(/bin/sh) shell with an C(id) tool. attributes: @@ -41,19 +41,19 @@ options: path: description: - Path to a file on the managed node. - - Mutually exclusive with I(content). One of I(content) and I(path) is required. + - Mutually exclusive with O(content). One of O(content) and O(path) is required. type: path content: description: - The file's content. - - If you plan to provide binary data, provide it pre-encoded to base64, and set I(content_is_b64=true). - - Mutually exclusive with I(path). One of I(content) and I(path) is required. + - If you plan to provide binary data, provide it pre-encoded to base64, and set O(content_is_b64=true). + - Mutually exclusive with O(path). One of O(content) and O(path) is required. type: str content_is_b64: description: - - If set to C(true), the content in I(content) is assumed to be Base64 encoded and + - If set to V(true), the content in O(content) is assumed to be Base64 encoded and will be decoded before being used. - - To use binary I(content), it is better to keep it Base64 encoded and let it + - To use binary O(content), it is better to keep it Base64 encoded and let it be decoded by this option. Otherwise you risk the data to be interpreted as UTF-8 and corrupted. type: bool @@ -77,7 +77,7 @@ options: owner_id: description: - The owner ID to use when writing the file to disk. - - If provided, I(group_id) must also be provided. + - If provided, O(group_id) must also be provided. - If not provided, the module will try to determine the user and group ID for the current user in the container. This will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available. Also the container must be running. @@ -85,7 +85,7 @@ options: group_id: description: - The group ID to use when writing the file to disk. - - If provided, I(owner_id) must also be provided. + - If provided, O(owner_id) must also be provided. - If not provided, the module will try to determine the user and group ID for the current user in the container. This will only work if C(/bin/sh) is present in the container and the C(id) binary or shell builtin is available. Also the container must be running. @@ -97,8 +97,8 @@ options: type: int force: description: - - If set to C(true), force writing the file (without performing any idempotency checks). - - If set to C(false), only write the file if it does not exist on the target. If a filesystem object exists at + - If set to V(true), force writing the file (without performing any idempotency checks). + - If set to V(false), only write the file if it does not exist on the target. If a filesystem object exists at the destination, the module will not do any change. - If this option is not specified, the module will be idempotent. To verify idempotency, it will try to get information on the filesystem object in the container, and if everything seems to match will download the file from the container @@ -138,7 +138,7 @@ RETURN = ''' container_path: description: - The actual path in the container. - - Can only be different from I(container_path) when I(follow=true). + - Can only be different from O(container_path) when O(follow=true). type: str returned: success ''' diff --git a/ansible_collections/community/docker/plugins/modules/docker_container_exec.py b/ansible_collections/community/docker/plugins/modules/docker_container_exec.py index 522a70a33..0d92dad96 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_container_exec.py +++ b/ansible_collections/community/docker/plugins/modules/docker_container_exec.py @@ -42,21 +42,21 @@ options: description: - The command to execute. - Since this is a list of arguments, no quoting is needed. - - Exactly one of I(argv) or I(command) must be specified. + - Exactly one of O(argv) or O(command) must be specified. command: type: str description: - The command to execute. - - Exactly one of I(argv) or I(command) must be specified. + - Exactly one of O(argv) or O(command) must be specified. chdir: type: str description: - The directory to run the command in. detach: description: - - Whether to run the command synchronously (I(detach=false), default) or asynchronously (I(detach=true)). - - If set to C(true), I(stdin) cannot be provided, and the return values C(stdout), C(stderr) and - C(rc) are not returned. + - Whether to run the command synchronously (O(detach=false), default) or asynchronously (O(detach=true)). + - If set to V(true), O(stdin) cannot be provided, and the return values RV(stdout), RV(stderr), and + RV(rc) are not returned. type: bool default: false version_added: 2.1.0 @@ -68,12 +68,12 @@ options: type: str description: - Set the stdin of the command directly to the specified value. - - Can only be used if I(detach=false). + - Can only be used if O(detach=false). stdin_add_newline: type: bool default: true description: - - If set to C(true), appends a newline to I(stdin). + - If set to V(true), appends a newline to O(stdin). strip_empty_ends: type: bool default: true @@ -87,16 +87,17 @@ options: env: description: - Dictionary of environment variables with their respective values to be passed to the command ran inside the container. - - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example C("true")) in order to avoid data loss. - - Please note that if you are passing values in with Jinja2 templates, like C("{{ value }}"), you need to add C(| string) to prevent Ansible to - convert strings such as C("true") back to booleans. The correct way is to use C("{{ value | string }}"). + - Values which might be parsed as numbers, booleans or other types by the YAML parser must be quoted (for example V("true")) in order to avoid data loss. + - Please note that if you are passing values in with Jinja2 templates, like V("{{ value }}"), you need to add V(| string) to prevent Ansible to + convert strings such as V("true") back to booleans. The correct way is to use V("{{ value | string }}"). type: dict version_added: 2.1.0 notes: - - Does not support C(check_mode). - - Does B(not work with TCP TLS sockets) when using I(stdin). This is caused by the inability to send C(close_notify) without closing the connection + - Does B(not work with TCP TLS sockets) when using O(stdin). This is caused by the inability to send C(close_notify) without closing the connection with Python's C(SSLSocket)s. See U(https://github.com/ansible-collections/community.docker/issues/605) for more information. + - If you need to evaluate environment variables of the container in O(command) or O(argv), you need to pass the command through a shell, + like O(command=/bin/sh -c "echo $ENV_VARIABLE"). author: - "Felix Fontein (@felixfontein)" @@ -134,23 +135,23 @@ EXAMPLES = ''' RETURN = ''' stdout: type: str - returned: success and I(detach=false) + returned: success and O(detach=false) description: - The standard output of the container command. stderr: type: str - returned: success and I(detach=false) + returned: success and O(detach=false) description: - The standard error output of the container command. rc: type: int - returned: success and I(detach=false) + returned: success and O(detach=false) sample: 0 description: - The exit code of the command. exec_id: type: str - returned: success and I(detach=true) + returned: success and O(detach=true) sample: 249d9e3075655baf705ed8f40488c5e9434049cf3431976f1bfdb73741c574c5 description: - The execution ID of the command. diff --git a/ansible_collections/community/docker/plugins/modules/docker_container_info.py b/ansible_collections/community/docker/plugins/modules/docker_container_info.py index bfc28156b..ff24b1bc0 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_container_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_container_info.py @@ -66,7 +66,7 @@ exists: container: description: - Facts representing the current state of the container. Matches the docker inspection output. - - Will be C(none) if container does not exist. + - Will be V(none) if container does not exist. returned: always type: dict sample: '{ diff --git a/ansible_collections/community/docker/plugins/modules/docker_host_info.py b/ansible_collections/community/docker/plugins/modules/docker_host_info.py index f08845faa..696cdfd0c 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_host_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_host_info.py @@ -108,10 +108,10 @@ options: default: false verbose_output: description: - - When set to C(true) and I(networks), I(volumes), I(images), I(containers) or I(disk_usage) is set to C(true) + - When set to V(true) and O(networks), O(volumes), O(images), O(containers), or O(disk_usage) is set to V(true) then output will contain verbose information about objects matching the full output of API method. For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/). - - The verbose output in this module contains only subset of information returned by I(_info) module + - The verbose output in this module contains only subset of information returned by this module for each type of the objects. type: bool default: false @@ -169,7 +169,7 @@ EXAMPLES = ''' RETURN = ''' can_talk_to_docker: description: - - Will be C(true) if the module can talk to the docker daemon. + - Will be V(true) if the module can talk to the docker daemon. returned: both on success and on error type: bool @@ -181,40 +181,40 @@ host_info: volumes: description: - List of dict objects containing the basic information about each volume. - Keys matches the C(docker volume ls) output unless I(verbose_output=true). - See description for I(verbose_output). - returned: When I(volumes) is C(true) + Keys matches the C(docker volume ls) output unless O(verbose_output=true). + See description for O(verbose_output). + returned: When O(volumes=true) type: list elements: dict networks: description: - List of dict objects containing the basic information about each network. - Keys matches the C(docker network ls) output unless I(verbose_output=true). - See description for I(verbose_output). - returned: When I(networks) is C(true) + Keys matches the C(docker network ls) output unless O(verbose_output=true). + See description for O(verbose_output). + returned: When O(networks=true) type: list elements: dict containers: description: - List of dict objects containing the basic information about each container. - Keys matches the C(docker container ls) output unless I(verbose_output=true). - See description for I(verbose_output). - returned: When I(containers) is C(true) + Keys matches the C(docker container ls) output unless O(verbose_output=true). + See description for O(verbose_output). + returned: When O(containers=true) type: list elements: dict images: description: - List of dict objects containing the basic information about each image. - Keys matches the C(docker image ls) output unless I(verbose_output=true). - See description for I(verbose_output). - returned: When I(images) is C(true) + Keys matches the C(docker image ls) output unless O(verbose_output=true). + See description for O(verbose_output). + returned: When O(images=true) type: list elements: dict disk_usage: description: - Information on summary disk usage by images, containers and volumes on docker host - unless I(verbose_output=true). See description for I(verbose_output). - returned: When I(disk_usage) is C(true) + unless O(verbose_output=true). See description for O(verbose_output). + returned: When O(disk_usage=true) type: dict ''' diff --git a/ansible_collections/community/docker/plugins/modules/docker_image.py b/ansible_collections/community/docker/plugins/modules/docker_image.py index 735de786a..b229ad382 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_image.py +++ b/ansible_collections/community/docker/plugins/modules/docker_image.py @@ -20,6 +20,7 @@ description: notes: - Building images is done using Docker daemon's API. It is not possible to use BuildKit / buildx this way. + Use M(community.docker.docker_image_build) to build images with BuildKit. extends_documentation_fragment: - community.docker.docker.api_documentation @@ -38,12 +39,12 @@ options: source: description: - "Determines where the module will try to retrieve the image from." - - "Use C(build) to build the image from a C(Dockerfile). I(build.path) must + - "Use V(build) to build the image from a C(Dockerfile). O(build.path) must be specified when this value is used." - - "Use C(load) to load the image from a C(.tar) file. I(load_path) must + - "Use V(load) to load the image from a C(.tar) file. O(load_path) must be specified when this value is used." - - "Use C(pull) to pull the image from a registry." - - "Use C(local) to make sure that the image is already available on the local + - "Use V(pull) to pull the image from a registry." + - "Use V(local) to make sure that the image is already available on the local docker daemon. This means that the module does not try to build, pull or load the image." type: str choices: @@ -63,8 +64,8 @@ options: elements: str dockerfile: description: - - Use with state C(present) and source C(build) to provide an alternate name for the Dockerfile to use when building an image. - - This can also include a relative path (relative to I(path)). + - Use with O(state=present) and O(source=build) to provide an alternate name for the Dockerfile to use when building an image. + - This can also include a relative path (relative to O(build.path)). type: str http_timeout: description: @@ -112,13 +113,21 @@ options: suboptions: memory: description: - - Set memory limit for build. - type: int + - "Memory limit for build in format C(<number>[<unit>]). Number is a positive integer. + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." + - Omitting the unit defaults to bytes. + - Before community.docker 3.6.0, no units were allowed. + type: str memswap: description: - - Total memory (memory + swap). - - Use C(-1) to disable swap. - type: int + - "Total memory limit (memory + swap) for build in format C(<number>[<unit>]), or + the special values V(unlimited) or V(-1) for unlimited swap usage. + Number is a positive integer. Unit can be V(B) (byte), V(K) (kibibyte, 1024B), + V(M) (mebibyte), V(G) (gibibyte), V(T) (tebibyte), or V(P) (pebibyte)." + - Omitting the unit defaults to bytes. + - Before community.docker 3.6.0, no units were allowed, and neither was the special value V(unlimited). + type: str cpushares: description: - CPU shares (relative weight). @@ -126,11 +135,11 @@ options: cpusetcpus: description: - CPUs in which to allow execution. - - For example, C(0-3) or C(0,1). + - For example, V(0-3) or V(0,1). type: str use_config_proxy: description: - - If set to C(true) and a proxy configuration is specified in the docker client configuration + - If set to V(true) and a proxy configuration is specified in the docker client configuration (by default C($HOME/.docker/config.json)), the corresponding environment variables will be set in the container being built. type: bool @@ -144,37 +153,50 @@ options: - Platform in the format C(os[/arch[/variant]]). type: str version_added: 1.1.0 + shm_size: + description: + - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer. + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." + - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses V(64M). + type: str + version_added: 3.6.0 + labels: + description: + - Dictionary of key value pairs. + type: dict + version_added: 3.6.0 archive_path: description: - - Use with state C(present) to archive an image to a .tar file. + - Use with O(state=present) to archive an image to a C(.tar) file. type: path load_path: description: - - Use with state C(present) to load an image from a .tar file. - - Set I(source) to C(load) if you want to load the image. + - Use with O(state=present) to load an image from a C(.tar) file. + - Set O(source=load) if you want to load the image. type: path force_source: description: - - Use with state C(present) to build, load or pull an image (depending on the - value of the I(source) option) when the image already exists. + - Use with O(state=present) to build, load or pull an image (depending on the + value of the O(source) option) when the image already exists. type: bool default: false force_absent: description: - - Use with state I(absent) to un-tag and remove all images matching the specified name. + - Use with O(state=absent) to un-tag and remove all images matching the specified name. type: bool default: false force_tag: description: - - Use with state C(present) to force tagging an image. + - Use with O(state=present) to force tagging an image. type: bool default: false name: description: - "Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). When pushing or pulling an image the name can optionally include the tag by appending C(:tag_name)." - - Note that image IDs (hashes) are only supported for I(state=absent), for I(state=present) with I(source=load), - and for I(state=present) with I(source=local). + - Note that image IDs (hashes) are only supported for O(state=absent), for O(state=present) with O(source=load), + and for O(state=present) with O(source=local). type: str required: true pull: @@ -191,23 +213,23 @@ options: type: str push: description: - - Push the image to the registry. Specify the registry as part of the I(name) or I(repository) parameter. + - Push the image to the registry. Specify the registry as part of the O(name) or O(repository) parameter. type: bool default: false repository: description: - - Use with state C(present) to tag the image. - - Expects format C(repository:tag). If no tag is provided, will use the value of the I(tag) parameter or C(latest). - - If I(push=true), I(repository) must either include a registry, or will be assumed to belong to the default + - Use with O(state=present) to tag the image. + - Expects format C(repository:tag). If no tag is provided, will use the value of the O(tag) parameter or V(latest). + - If O(push=true), O(repository) must either include a registry, or will be assumed to belong to the default registry (Docker Hub). type: str state: description: - Make assertions about the state of an image. - - When C(absent) an image will be removed. Use the force option to un-tag and remove all images + - When V(absent) an image will be removed. Use the force option to un-tag and remove all images matching the provided name. - - When C(present) check if an image exists using the provided name and tag. If the image is not found or the - force option is used, the image will either be pulled, built or loaded, depending on the I(source) option. + - When V(present) check if an image exists using the provided name and tag. If the image is not found or the + force option is used, the image will either be pulled, built or loaded, depending on the O(source) option. type: str default: present choices: @@ -216,8 +238,8 @@ options: tag: description: - Used to select an image when pulling. Will be added to the image when pushing, tagging or building. Defaults to - I(latest). - - If I(name) parameter format is I(name:tag), then tag value from I(name) will take precedence. + V(latest). + - If O(name) parameter format is C(name:tag), then tag value from O(name) will take precedence. type: str default: latest @@ -229,6 +251,15 @@ author: - Chris Houseknecht (@chouseknecht) - Sorin Sbarnea (@ssbarnea) +seealso: + - module: community.docker.docker_image_build + - module: community.docker.docker_image_export + - module: community.docker.docker_image_info + - module: community.docker.docker_image_load + - module: community.docker.docker_image_pull + - module: community.docker.docker_image_push + - module: community.docker.docker_image_remove + - module: community.docker.docker_image_tag ''' EXAMPLES = ''' @@ -338,6 +369,7 @@ import os import traceback from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.text.formatters import human_to_bytes from ansible_collections.community.docker.plugins.module_utils.common_api import ( AnsibleDockerClient, @@ -377,6 +409,17 @@ from ansible_collections.community.docker.plugins.module_utils._api.utils.utils ) +def convert_to_bytes(value, module, name, unlimited_value=None): + if value is None: + return value + try: + if unlimited_value is not None and value in ('unlimited', str(unlimited_value)): + return unlimited_value + return human_to_bytes(value) + except ValueError as exc: + module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + + class ImageManager(DockerBaseClass): def __init__(self, client, results): @@ -402,6 +445,12 @@ class ImageManager(DockerBaseClass): self.archive_path = parameters['archive_path'] self.cache_from = build.get('cache_from') self.container_limits = build.get('container_limits') + if self.container_limits and 'memory' in self.container_limits: + self.container_limits['memory'] = convert_to_bytes( + self.container_limits['memory'], self.client.module, 'build.container_limits.memory') + if self.container_limits and 'memswap' in self.container_limits: + self.container_limits['memswap'] = convert_to_bytes( + self.container_limits['memswap'], self.client.module, 'build.container_limits.memswap', unlimited_value=-1) self.dockerfile = build.get('dockerfile') self.force_source = parameters['force_source'] self.force_absent = parameters['force_absent'] @@ -424,6 +473,8 @@ class ImageManager(DockerBaseClass): self.buildargs = build.get('args') self.build_platform = build.get('platform') self.use_config_proxy = build.get('use_config_proxy') + self.shm_size = convert_to_bytes(build.get('shm_size'), self.client.module, 'build.shm_size') + self.labels = clean_dict_booleans_for_docker_api(build.get('labels')) # If name contains a tag, it takes precedence over tag parameter. if not is_image_name_id(self.name): @@ -825,6 +876,12 @@ class ImageManager(DockerBaseClass): if self.build_platform is not None: params['platform'] = self.build_platform + if self.shm_size is not None: + params['shmsize'] = self.shm_size + + if self.labels: + params['labels'] = json.dumps(self.labels) + if context is not None: headers['Content-Type'] = 'application/tar' @@ -945,8 +1002,8 @@ def main(): build=dict(type='dict', options=dict( cache_from=dict(type='list', elements='str'), container_limits=dict(type='dict', options=dict( - memory=dict(type='int'), - memswap=dict(type='int'), + memory=dict(type='str'), + memswap=dict(type='str'), cpushares=dict(type='int'), cpusetcpus=dict(type='str'), )), @@ -962,6 +1019,8 @@ def main(): target=dict(type='str'), etc_hosts=dict(type='dict'), platform=dict(type='str'), + shm_size=dict(type='str'), + labels=dict(type='dict'), )), archive_path=dict(type='path'), force_source=dict(type='bool', default=False), diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_build.py b/ansible_collections/community/docker/plugins/modules/docker_image_build.py new file mode 100644 index 000000000..7f9502098 --- /dev/null +++ b/ansible_collections/community/docker/plugins/modules/docker_image_build.py @@ -0,0 +1,316 @@ +#!/usr/bin/python +# +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_image_build + +short_description: Build Docker images using Docker buildx + +version_added: 3.6.0 + +description: + - This module allows you to build Docker images using Docker's buildx plugin (BuildKit). + +extends_documentation_fragment: + - community.docker.docker.cli_documentation + - community.docker.attributes + - community.docker.attributes.actiongroup_docker + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + name: + description: + - "Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). + When pushing or pulling an image the name can optionally include the tag by appending C(:tag_name)." + - Note that image IDs (hashes) and names with digest cannot be used. + type: str + required: true + tag: + description: + - Tag for the image name O(name) that is to be tagged. + - If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence. + type: str + default: latest + path: + description: + - The path for the build environment. + type: path + required: true + dockerfile: + description: + - Provide an alternate name for the Dockerfile to use when building an image. + - This can also include a relative path (relative to O(path)). + type: str + cache_from: + description: + - List of image names to consider as cache source. + type: list + elements: str + pull: + description: + - When building an image downloads any updates to the FROM image in Dockerfile. + type: bool + default: false + network: + description: + - The network to use for C(RUN) build instructions. + type: str + nocache: + description: + - Do not use cache when building an image. + type: bool + default: false + etc_hosts: + description: + - Extra hosts to add to C(/etc/hosts) in building containers, as a mapping of hostname to IP address. + type: dict + args: + description: + - Provide a dictionary of C(key:value) build arguments that map to Dockerfile ARG directive. + - Docker expects the value to be a string. For convenience any non-string values will be converted to strings. + type: dict + target: + description: + - When building an image specifies an intermediate build stage by + name as a final stage for the resulting image. + type: str + platform: + description: + - Platform in the format C(os[/arch[/variant]]). + type: str + shm_size: + description: + - "Size of C(/dev/shm) in format C(<number>[<unit>]). Number is positive integer. + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." + - Omitting the unit defaults to bytes. If you omit the size entirely, Docker daemon uses V(64M). + type: str + labels: + description: + - Dictionary of key value pairs. + type: dict + rebuild: + description: + - Defines the behavior of the module if the image to build (as specified in O(name) and O(tag)) already exists. + type: str + choices: + - never + - always + default: never + +requirements: + - "Docker CLI with Docker buildx plugin" + +author: + - Felix Fontein (@felixfontein) + +seealso: + - module: community.docker.docker_image_push + - module: community.docker.docker_image_tag +''' + +EXAMPLES = ''' +- name: Build Python 3.12 image + community.docker.docker_image_build: + name: localhost/python/3.12:latest + path: /home/user/images/python + dockerfile: Dockerfile-3.12 +''' + +RETURN = ''' +image: + description: Image inspection results for the affected image. + returned: success + type: dict + sample: {} +''' + +import os +import traceback + +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.text.formatters import human_to_bytes + +from ansible_collections.community.docker.plugins.module_utils.common_cli import ( + AnsibleModuleDockerClient, + DockerException, +) + +from ansible_collections.community.docker.plugins.module_utils.util import ( + DockerBaseClass, + clean_dict_booleans_for_docker_api, + is_image_name_id, + is_valid_tag, +) + +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + parse_repository_tag, +) + + +def convert_to_bytes(value, module, name, unlimited_value=None): + if value is None: + return value + try: + if unlimited_value is not None and value in ('unlimited', str(unlimited_value)): + return unlimited_value + return human_to_bytes(value) + except ValueError as exc: + module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + + +def dict_to_list(dictionary, concat='='): + return ['%s%s%s' % (k, concat, v) for k, v in sorted(dictionary.items())] + + +class ImageBuilder(DockerBaseClass): + def __init__(self, client): + super(ImageBuilder, self).__init__() + self.client = client + self.check_mode = self.client.check_mode + parameters = self.client.module.params + + self.cache_from = parameters['cache_from'] + self.pull = parameters['pull'] + self.network = parameters['network'] + self.nocache = parameters['nocache'] + self.etc_hosts = clean_dict_booleans_for_docker_api(parameters['etc_hosts']) + self.args = clean_dict_booleans_for_docker_api(parameters['args']) + self.target = parameters['target'] + self.platform = parameters['platform'] + self.shm_size = convert_to_bytes(parameters['shm_size'], self.client.module, 'shm_size') + self.labels = clean_dict_booleans_for_docker_api(parameters['labels']) + self.rebuild = parameters['rebuild'] + + buildx = self.client.get_client_plugin_info('buildx') + if buildx is None: + self.fail('Docker CLI {0} does not have the buildx plugin installed'.format(self.client.get_cli())) + + self.path = parameters['path'] + if not os.path.isdir(self.path): + self.fail('"{0}" is not an existing directory'.format(self.path)) + self.dockerfile = parameters['dockerfile'] + if self.dockerfile and not os.path.isfile(os.path.join(self.path, self.dockerfile)): + self.fail('"{0}" is not an existing file'.format(os.path.join(self.path, self.dockerfile))) + + self.name = parameters['name'] + self.tag = parameters['tag'] + if not is_valid_tag(self.tag, allow_empty=True): + self.fail('"{0}" is not a valid docker tag'.format(self.tag)) + if is_image_name_id(self.name): + self.fail('Image name must not be a digest') + + # If name contains a tag, it takes precedence over tag parameter. + repo, repo_tag = parse_repository_tag(self.name) + if repo_tag: + self.name = repo + self.tag = repo_tag + + if is_image_name_id(self.tag): + self.fail('Image name must not contain a digest, but have a tag') + + def fail(self, msg, **kwargs): + self.client.fail(msg, **kwargs) + + def add_list_arg(self, args, option, values): + for value in values: + args.extend([option, value]) + + def add_args(self, args): + args.extend(['--tag', '%s:%s' % (self.name, self.tag)]) + if self.dockerfile: + args.extend(['--file', os.path.join(self.path, self.dockerfile)]) + if self.cache_from: + self.add_list_arg(args, '--cache-from', self.cache_from) + if self.pull: + args.append('--pull') + if self.network: + args.extend(['--network', self.network]) + if self.nocache: + args.append('--no-cache') + if self.etc_hosts: + self.add_list_arg(args, '--add-host', dict_to_list(self.etc_hosts, ':')) + if self.args: + self.add_list_arg(args, '--build-arg', dict_to_list(self.args)) + if self.target: + args.extend(['--target', self.target]) + if self.platform: + args.extend(['--platform', self.platform]) + if self.shm_size: + args.extend(['--shm-size', str(self.shm_size)]) + if self.labels: + self.add_list_arg(args, '--label', dict_to_list(self.labels)) + + def build_image(self): + image = self.client.find_image(self.name, self.tag) + results = dict( + changed=False, + actions=[], + image=image or {}, + ) + + if image: + if self.rebuild == 'never': + return results + + results['changed'] = True + if not self.check_mode: + args = ['buildx', 'build', '--progress', 'plain'] + self.add_args(args) + args.extend(['--', self.path]) + rc, stdout, stderr = self.client.call_cli(*args) + if rc != 0: + self.fail('Building %s:%s failed' % (self.name, self.tag), stdout=to_native(stdout), stderr=to_native(stderr)) + results['stdout'] = to_native(stdout) + results['stderr'] = to_native(stderr) + results['image'] = self.client.find_image(self.name, self.tag) or {} + + return results + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + tag=dict(type='str', default='latest'), + path=dict(type='path', required=True), + dockerfile=dict(type='str'), + cache_from=dict(type='list', elements='str'), + pull=dict(type='bool', default=False), + network=dict(type='str'), + nocache=dict(type='bool', default=False), + etc_hosts=dict(type='dict'), + args=dict(type='dict'), + target=dict(type='str'), + platform=dict(type='str'), + shm_size=dict(type='str'), + labels=dict(type='dict'), + rebuild=dict(type='str', choices=['never', 'always'], default='never'), + ) + + client = AnsibleModuleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + results = ImageBuilder(client).build_image() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_export.py b/ansible_collections/community/docker/plugins/modules/docker_image_export.py new file mode 100644 index 000000000..9e03875a2 --- /dev/null +++ b/ansible_collections/community/docker/plugins/modules/docker_image_export.py @@ -0,0 +1,283 @@ +#!/usr/bin/python +# +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_image_export + +short_description: Export (archive) Docker images + +version_added: 3.7.0 + +description: + - Creates an archive (tarball) from one or more Docker images. + - This can be copied to another machine and loaded with M(community.docker.docker_image_load). + +extends_documentation_fragment: + - community.docker.docker.api_documentation + - community.docker.attributes + - community.docker.attributes.actiongroup_docker + +attributes: + check_mode: + support: full + diff_mode: + support: none + +options: + names: + description: + - "One or more image names. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). + When pushing or pulling an image the name can optionally include the tag by appending C(:tag_name)." + - Note that image IDs (hashes) can also be used. + type: list + elements: str + required: true + aliases: + - name + tag: + description: + - Tag for the image name O(name) that is to be tagged. + - If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence. + type: str + default: latest + path: + description: + - The C(.tar) file the image should be exported to. + type: path + force: + description: + - Export the image even if the C(.tar) file already exists and seems to contain the right image. + type: bool + default: false + +requirements: + - "Docker API >= 1.25" + +author: + - Felix Fontein (@felixfontein) + +seealso: + - module: community.docker.docker_image + - module: community.docker.docker_image_info + - module: community.docker.docker_image_load +''' + +EXAMPLES = ''' +- name: Export an image + community.docker.docker_image_export: + name: pacur/centos-7 + path: /tmp/centos-7.tar + +- name: Export multiple images + community.docker.docker_image_export: + names: + - hello-world:latest + - pacur/centos-7:latest + path: /tmp/various.tar +''' + +RETURN = ''' +images: + description: Image inspection results for the affected images. + returned: success + type: list + elements: dict + sample: [] +''' + +import traceback + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.docker.plugins.module_utils.common_api import ( + AnsibleDockerClient, + RequestException, +) + +from ansible_collections.community.docker.plugins.module_utils.image_archive import ( + load_archived_image_manifest, + api_image_id, + ImageArchiveInvalidException, +) + +from ansible_collections.community.docker.plugins.module_utils.util import ( + DockerBaseClass, + is_image_name_id, + is_valid_tag, +) +from ansible_collections.community.docker.plugins.module_utils._api.constants import ( + DEFAULT_DATA_CHUNK_SIZE, +) +from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + parse_repository_tag, +) + + +class ImageExportManager(DockerBaseClass): + def __init__(self, client): + super(ImageExportManager, self).__init__() + + self.client = client + parameters = self.client.module.params + self.check_mode = self.client.check_mode + + self.path = parameters['path'] + self.force = parameters['force'] + self.tag = parameters['tag'] + + if not is_valid_tag(self.tag, allow_empty=True): + self.fail('"{0}" is not a valid docker tag'.format(self.tag)) + + # If name contains a tag, it takes precedence over tag parameter. + self.names = [] + for name in parameters['names']: + if is_image_name_id(name): + self.names.append({'id': name, 'joined': name}) + else: + repo, repo_tag = parse_repository_tag(name) + if not repo_tag: + repo_tag = self.tag + self.names.append({'name': repo, 'tag': repo_tag, 'joined': '%s:%s' % (repo, repo_tag)}) + + if not self.names: + self.fail('At least one image name must be specified') + + def fail(self, msg): + self.client.fail(msg) + + def get_export_reason(self): + if self.force: + return 'Exporting since force=true' + + try: + archived_images = load_archived_image_manifest(self.path) + if archived_images is None: + return 'Overwriting since no image is present in archive' + except ImageArchiveInvalidException as exc: + self.log('Unable to extract manifest summary from archive: %s' % to_native(exc)) + return 'Overwriting an unreadable archive file' + + left_names = list(self.names) + for archived_image in archived_images: + found = False + for i, name in enumerate(left_names): + if name['id'] == api_image_id(archived_image.image_id) and [name['joined']] == archived_image.repo_tags: + del left_names[i] + found = True + break + if not found: + return 'Overwriting archive since it contains unexpected image %s named %s' % ( + archived_image.image_id, ', '.join(archived_image.repo_tags) + ) + if left_names: + return 'Overwriting archive since it is missing image(s) %s' % (', '.join([name['joined'] for name in left_names])) + + return None + + def write_chunks(self, chunks): + try: + with open(self.path, 'wb') as fd: + for chunk in chunks: + fd.write(chunk) + except Exception as exc: + self.fail("Error writing image archive %s - %s" % (self.path, to_native(exc))) + + def export_images(self): + image_names = [name['joined'] for name in self.names] + image_names_str = ', '.join(image_names) + if len(image_names) == 1: + self.log("Getting archive of image %s" % image_names[0]) + try: + chunks = self.client._stream_raw_result( + self.client._get(self.client._url('/images/{0}/get', image_names[0]), stream=True), + DEFAULT_DATA_CHUNK_SIZE, + False, + ) + except Exception as exc: + self.fail("Error getting image %s - %s" % (image_names[0], to_native(exc))) + else: + self.log("Getting archive of images %s" % image_names_str) + try: + chunks = self.client._stream_raw_result( + self.client._get( + self.client._url('/images/get'), + stream=True, + params={'names': image_names}, + ), + DEFAULT_DATA_CHUNK_SIZE, + False, + ) + except Exception as exc: + self.fail("Error getting images %s - %s" % (image_names_str, to_native(exc))) + + self.write_chunks(chunks) + + def run(self): + tag = self.tag + if not tag: + tag = "latest" + + images = [] + for name in self.names: + if 'id' in name: + image = self.client.find_image_by_id(name['id'], accept_missing_image=True) + else: + image = self.client.find_image(name=name['name'], tag=name['tag']) + if not image: + self.fail("Image %s not found" % name['joined']) + images.append(image) + + # Will have a 'sha256:' prefix + name['id'] = image['Id'] + + results = { + 'changed': False, + 'images': images, + } + + reason = self.get_export_reason() + if reason is not None: + results['msg'] = reason + results['changed'] = True + + if not self.check_mode: + self.export_images() + + return results + + +def main(): + argument_spec = dict( + path=dict(type='path'), + force=dict(type='bool', default=False), + names=dict(type='list', elements='str', required=True, aliases=['name']), + tag=dict(type='str', default='latest'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + results = ImageExportManager(client).run() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + except RequestException as e: + client.fail( + 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_info.py b/ansible_collections/community/docker/plugins/modules/docker_image_info.py index e4f480b1c..2f441672f 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_image_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_image_info.py @@ -19,8 +19,8 @@ description: - If an image does not exist locally, it will not appear in the results. If you want to check whether an image exists locally, you can call the module with the image name, then check whether the result list is empty (image does not exist) or has one element (the image exists locally). - - The module will not attempt to pull images from registries. Use M(community.docker.docker_image) with I(source) set to C(pull) - to ensure an image is pulled. + - The module will not attempt to pull images from registries. Use M(community.docker.docker_image) with + O(community.docker.docker_image#module:source=pull) to ensure an image is pulled. notes: - This module was called C(docker_image_facts) before Ansible 2.8. The usage did not change. @@ -35,7 +35,7 @@ options: name: description: - An image name or a list of image names. Name format will be C(name[:tag]) or C(repository/name[:tag]), - where C(tag) is optional. If a tag is not provided, C(latest) will be used. Instead of image names, also + where C(tag) is optional. If a tag is not provided, V(latest) will be used. Instead of image names, also image IDs can be used. - If no name is provided, a list of all images will be returned. type: list diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_load.py b/ansible_collections/community/docker/plugins/modules/docker_image_load.py index 880ae4e4c..ec628d1ba 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_image_load.py +++ b/ansible_collections/community/docker/plugins/modules/docker_image_load.py @@ -39,14 +39,17 @@ options: type: path required: true -notes: - - Does not support C(check_mode). - requirements: - "Docker API >= 1.25" author: - Felix Fontein (@felixfontein) + +seealso: + - module: community.docker.docker_image_export + - module: community.docker.docker_image_push + - module: community.docker.docker_image_remove + - module: community.docker.docker_image_tag ''' EXAMPLES = ''' diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_pull.py b/ansible_collections/community/docker/plugins/modules/docker_image_pull.py new file mode 100644 index 000000000..a70942b3f --- /dev/null +++ b/ansible_collections/community/docker/plugins/modules/docker_image_pull.py @@ -0,0 +1,223 @@ +#!/usr/bin/python +# +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_image_pull + +short_description: Pull Docker images from registries + +version_added: 3.6.0 + +description: + - Pulls a Docker image from a registry. + +extends_documentation_fragment: + - community.docker.docker.api_documentation + - community.docker.attributes + - community.docker.attributes.actiongroup_docker + +attributes: + check_mode: + support: partial + details: + - When trying to pull an image with O(pull=always), the module assumes this is always changed in check mode. + - When check mode is combined with diff mode, the pulled image's ID is always shown as V(unknown) in the diff. + diff_mode: + support: full + +options: + name: + description: + - "Image name. Name format must be one of V(name), V(repository/name), or V(registry_server:port/name). + - The name can optionally include the tag by appending V(:tag_name), or it can contain a digest by appending V(@hash:digest)." + type: str + required: true + tag: + description: + - Used to select an image when pulling. Defaults to V(latest). + - If O(name) parameter format is C(name:tag) or C(image@hash:digest), then O(tag) will be ignored. + type: str + default: latest + platform: + description: + - Ask for this specific platform when pulling. + type: str + pull: + description: + - Determines when to pull an image. + - If V(always), will always pull the image. + - If V(not_present), will only pull the image if no image of the name exists on the current Docker daemon, + or if O(platform) does not match. + type: str + choices: + - always + - not_present + default: always + +requirements: + - "Docker API >= 1.25" + +author: + - Felix Fontein (@felixfontein) + +seealso: + - module: community.docker.docker_image_pull + - module: community.docker.docker_image_remove + - module: community.docker.docker_image_tag +''' + +EXAMPLES = ''' +- name: Pull an image + community.docker.docker_image_pull: + name: pacur/centos-7 + # Select platform for pulling. If not specified, will pull whatever docker prefers. + platform: amd64 +''' + +RETURN = ''' +image: + description: Image inspection results for the affected image. + returned: success + type: dict + sample: {} +''' + +import traceback + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.docker.plugins.module_utils.common_api import ( + AnsibleDockerClient, + RequestException, +) + +from ansible_collections.community.docker.plugins.module_utils.util import ( + DockerBaseClass, + is_image_name_id, + is_valid_tag, +) + +from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + parse_repository_tag, +) + +from ansible_collections.community.docker.plugins.module_utils._platform import ( + normalize_platform_string, + compare_platform_strings, + compose_platform_string, +) + + +def image_info(image): + result = {} + if image: + result['id'] = image['Id'] + else: + result['exists'] = False + return result + + +class ImagePuller(DockerBaseClass): + def __init__(self, client): + super(ImagePuller, self).__init__() + + self.client = client + self.check_mode = self.client.check_mode + + parameters = self.client.module.params + self.name = parameters['name'] + self.tag = parameters['tag'] + self.platform = parameters['platform'] + self.pull_mode = parameters['pull'] + + if is_image_name_id(self.name): + self.client.fail("Cannot pull an image by ID") + if not is_valid_tag(self.tag, allow_empty=True): + self.client.fail('"{0}" is not a valid docker tag!'.format(self.tag)) + + # If name contains a tag, it takes precedence over tag parameter. + repo, repo_tag = parse_repository_tag(self.name) + if repo_tag: + self.name = repo + self.tag = repo_tag + + def pull(self): + image = self.client.find_image(name=self.name, tag=self.tag) + results = dict( + changed=False, + actions=[], + image=image or {}, + diff=dict(before=image_info(image), after=image_info(image)), + ) + + if image and self.pull_mode == 'not_present': + if self.platform is None: + return results + host_info = self.client.info() + wanted_platform = normalize_platform_string( + self.platform, + daemon_os=host_info.get('OSType'), + daemon_arch=host_info.get('Architecture'), + ) + image_platform = compose_platform_string( + os=image.get('Os'), + arch=image.get('Architecture'), + variant=image.get('Variant'), + daemon_os=host_info.get('OSType'), + daemon_arch=host_info.get('Architecture'), + ) + if compare_platform_strings(wanted_platform, image_platform): + return results + + results['actions'].append('Pulled image %s:%s' % (self.name, self.tag)) + if self.check_mode: + results['changed'] = True + results['diff']['after'] = image_info(dict(Id='unknown')) + else: + results['image'], not_changed = self.client.pull_image(self.name, tag=self.tag, platform=self.platform) + results['changed'] = not not_changed + results['diff']['after'] = image_info(results['image']) + + return results + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + tag=dict(type='str', default='latest'), + platform=dict(type='str'), + pull=dict(type='str', choices=['always', 'not_present'], default='always'), + ) + + option_minimal_versions = dict( + platform=dict(docker_api_version='1.32'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + option_minimal_versions=option_minimal_versions, + ) + + try: + results = ImagePuller(client).pull() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + except RequestException as e: + client.fail( + 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_push.py b/ansible_collections/community/docker/plugins/modules/docker_image_push.py new file mode 100644 index 000000000..f1474d838 --- /dev/null +++ b/ansible_collections/community/docker/plugins/modules/docker_image_push.py @@ -0,0 +1,197 @@ +#!/usr/bin/python +# +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_image_push + +short_description: Push Docker images to registries + +version_added: 3.6.0 + +description: + - Pushes a Docker image to a registry. + +extends_documentation_fragment: + - community.docker.docker.api_documentation + - community.docker.attributes + - community.docker.attributes.actiongroup_docker + +attributes: + check_mode: + support: none + diff_mode: + support: none + +options: + name: + description: + - "Image name. Name format must be one of V(name), V(repository/name), or V(registry_server:port/name). + - The name can optionally include the tag by appending V(:tag_name), or it can contain a digest by appending V(@hash:digest)." + type: str + required: true + tag: + description: + - Select which image to push. Defaults to V(latest). + - If O(name) parameter format is C(name:tag) or C(image@hash:digest), then O(tag) will be ignored. + type: str + default: latest + +requirements: + - "Docker API >= 1.25" + +author: + - Felix Fontein (@felixfontein) + +seealso: + - module: community.docker.docker_image_pull + - module: community.docker.docker_image_remove + - module: community.docker.docker_image_tag +''' + +EXAMPLES = ''' +- name: Push an image + community.docker.docker_image_push: + name: registry.example.com:5000/repo/image + tag: latest +''' + +RETURN = ''' +image: + description: Image inspection results for the affected image. + returned: success + type: dict + sample: {} +''' + +import traceback + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.docker.plugins.module_utils.common_api import ( + AnsibleDockerClient, + RequestException, +) + +from ansible_collections.community.docker.plugins.module_utils.util import ( + DockerBaseClass, + is_image_name_id, + is_valid_tag, +) + +from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + parse_repository_tag, +) + +from ansible_collections.community.docker.plugins.module_utils._api.auth import ( + get_config_header, + resolve_repository_name, +) + + +class ImagePusher(DockerBaseClass): + def __init__(self, client): + super(ImagePusher, self).__init__() + + self.client = client + self.check_mode = self.client.check_mode + + parameters = self.client.module.params + self.name = parameters['name'] + self.tag = parameters['tag'] + + if is_image_name_id(self.name): + self.client.fail("Cannot push an image by ID") + if not is_valid_tag(self.tag, allow_empty=True): + self.client.fail('"{0}" is not a valid docker tag!'.format(self.tag)) + + # If name contains a tag, it takes precedence over tag parameter. + repo, repo_tag = parse_repository_tag(self.name) + if repo_tag: + self.name = repo + self.tag = repo_tag + + if is_image_name_id(self.tag): + self.client.fail("Cannot push an image by digest") + if not is_valid_tag(self.tag, allow_empty=False): + self.client.fail('"{0}" is not a valid docker tag!'.format(self.tag)) + + def push(self): + image = self.client.find_image(name=self.name, tag=self.tag) + if not image: + self.client.fail('Cannot find image %s:%s' % (self.name, self.tag)) + + results = dict( + changed=False, + actions=[], + image=image, + ) + + push_registry, push_repo = resolve_repository_name(self.name) + try: + results['actions'].append('Pushed image %s:%s' % (self.name, self.tag)) + + headers = {} + header = get_config_header(self.client, push_registry) + if header: + headers['X-Registry-Auth'] = header + response = self.client._post_json( + self.client._url("/images/{0}/push", self.name), + data=None, + headers=headers, + stream=True, + params={'tag': self.tag}, + ) + self.client._raise_for_status(response) + for line in self.client._stream_helper(response, decode=True): + self.log(line, pretty_print=True) + if line.get('errorDetail'): + raise Exception(line['errorDetail']['message']) + status = line.get('status') + if status == 'Pushing': + results['changed'] = True + except Exception as exc: + if 'unauthorized' in str(exc): + if 'authentication required' in str(exc): + self.client.fail("Error pushing image %s/%s:%s - %s. Try logging into %s first." % + (push_registry, push_repo, self.tag, to_native(exc), push_registry)) + else: + self.client.fail("Error pushing image %s/%s:%s - %s. Does the repository exist?" % + (push_registry, push_repo, self.tag, str(exc))) + self.client.fail("Error pushing image %s:%s: %s" % (self.name, self.tag, to_native(exc))) + + return results + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + tag=dict(type='str', default='latest'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=False, + ) + + try: + results = ImagePusher(client).push() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + except RequestException as e: + client.fail( + 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_remove.py b/ansible_collections/community/docker/plugins/modules/docker_image_remove.py new file mode 100644 index 000000000..c8ea326b2 --- /dev/null +++ b/ansible_collections/community/docker/plugins/modules/docker_image_remove.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# +# Copyright 2016 Red Hat | Ansible +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_image_remove + +short_description: Remove Docker images + +version_added: 3.6.0 + +description: + - Remove Docker images from the Docker daemon. + +extends_documentation_fragment: + - community.docker.docker.api_documentation + - community.docker.attributes + - community.docker.attributes.actiongroup_docker + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + name: + description: + - "Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). + When pushing or pulling an image the name can optionally include the tag by appending C(:tag_name)." + - Note that image IDs (hashes) can also be used. + type: str + required: true + tag: + description: + - Tag for the image name O(name) that is to be tagged. + - If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence. + type: str + default: latest + force: + description: + - Un-tag and remove all images matching the specified name. + type: bool + default: false + prune: + description: + - Delete untagged parent images. + type: bool + default: true + +requirements: + - "Docker API >= 1.25" + +author: + - Felix Fontein (@felixfontein) + +seealso: + - module: community.docker.docker_image_load + - module: community.docker.docker_image_pull + - module: community.docker.docker_image_tag +''' + +EXAMPLES = ''' + +- name: Remove an image + community.docker.docker_image_remove: + name: pacur/centos-7 +''' + +RETURN = ''' +image: + description: + - Image inspection results for the affected image before removal. + - Empty if the image was not found. + returned: success + type: dict + sample: {} +deleted: + description: + - The digests of the images that were deleted. + returned: success + type: list + elements: str + sample: [] +untagged: + description: + - The digests of the images that were untagged. + returned: success + type: list + elements: str + sample: [] +''' + +import traceback + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.docker.plugins.module_utils.common_api import ( + AnsibleDockerClient, + RequestException, +) + +from ansible_collections.community.docker.plugins.module_utils.util import ( + DockerBaseClass, + is_image_name_id, + is_valid_tag, +) + +from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException, NotFound +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + parse_repository_tag, +) + + +class ImageRemover(DockerBaseClass): + + def __init__(self, client): + super(ImageRemover, self).__init__() + + self.client = client + self.check_mode = self.client.check_mode + self.diff = self.client.module._diff + + parameters = self.client.module.params + self.name = parameters['name'] + self.tag = parameters['tag'] + self.force = parameters['force'] + self.prune = parameters['prune'] + + if not is_valid_tag(self.tag, allow_empty=True): + self.fail('"{0}" is not a valid docker tag'.format(self.tag)) + + # If name contains a tag, it takes precedence over tag parameter. + if not is_image_name_id(self.name): + repo, repo_tag = parse_repository_tag(self.name) + if repo_tag: + self.name = repo + self.tag = repo_tag + + def fail(self, msg): + self.client.fail(msg) + + def get_diff_state(self, image): + if not image: + return dict(exists=False) + return dict( + exists=True, + id=image['Id'], + tags=sorted(image.get('RepoTags') or []), + digests=sorted(image.get('RepoDigests') or []), + ) + + def absent(self): + results = dict( + changed=False, + actions=[], + image={}, + deleted=[], + untagged=[], + ) + + name = self.name + if is_image_name_id(name): + image = self.client.find_image_by_id(name, accept_missing_image=True) + else: + image = self.client.find_image(name, self.tag) + if self.tag: + name = "%s:%s" % (self.name, self.tag) + + if self.diff: + results['diff'] = dict(before=self.get_diff_state(image)) + + if not image: + if self.diff: + results['diff']['after'] = self.get_diff_state(image) + return results + + results['changed'] = True + results['actions'].append("Removed image %s" % (name)) + results['image'] = image + + if not self.check_mode: + try: + res = self.client.delete_json('/images/{0}', name, params={'force': self.force, 'noprune': not self.prune}) + except NotFound: + # If the image vanished while we were trying to remove it, don't fail + res = [] + except Exception as exc: + self.fail("Error removing image %s - %s" % (name, to_native(exc))) + + for entry in res: + if entry.get('Untagged'): + results['untagged'].append(entry['Untagged']) + if entry.get('Deleted'): + results['deleted'].append(entry['Deleted']) + + results['untagged'] = sorted(results['untagged']) + results['deleted'] = sorted(results['deleted']) + + if self.diff: + image_after = self.client.find_image_by_id(image['Id'], accept_missing_image=True) + results['diff']['after'] = self.get_diff_state(image_after) + + elif is_image_name_id(name): + results['deleted'].append(image['Id']) + results['untagged'] = sorted((image.get('RepoTags') or []) + (image.get('RepoDigests') or [])) + if not self.force and results['untagged']: + self.fail('Cannot delete image by ID that is still in use - use force=true') + if self.diff: + results['diff']['after'] = self.get_diff_state({}) + + elif is_image_name_id(self.tag): + results['untagged'].append(name) + if len(image.get('RepoTags') or []) < 1 and len(image.get('RepoDigests') or []) < 2: + results['deleted'].append(image['Id']) + if self.diff: + results['diff']['after'] = self.get_diff_state(image) + try: + results['diff']['after']['digests'].remove(name) + except ValueError: + pass + + else: + results['untagged'].append(name) + if len(image.get('RepoTags') or []) < 2 and len(image.get('RepoDigests') or []) < 1: + results['deleted'].append(image['Id']) + if self.diff: + results['diff']['after'] = self.get_diff_state(image) + try: + results['diff']['after']['tags'].remove(name) + except ValueError: + pass + + return results + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + tag=dict(type='str', default='latest'), + force=dict(type='bool', default=False), + prune=dict(type='bool', default=True), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + results = ImageRemover(client).absent() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + except RequestException as e: + client.fail( + 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/docker/plugins/modules/docker_image_tag.py b/ansible_collections/community/docker/plugins/modules/docker_image_tag.py new file mode 100644 index 000000000..c395a7fca --- /dev/null +++ b/ansible_collections/community/docker/plugins/modules/docker_image_tag.py @@ -0,0 +1,273 @@ +#!/usr/bin/python +# +# Copyright (c) 2023, Felix Fontein <felix@fontein.de> +# GNU General Public License v3.0+ (see LICENSES/GPL-3.0-or-later.txt or https://www.gnu.org/licenses/gpl-3.0.txt) +# SPDX-License-Identifier: GPL-3.0-or-later + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + + +DOCUMENTATION = ''' +--- +module: docker_image_tag + +short_description: Tag Docker images with new names and/or tags + +version_added: 3.6.0 + +description: + - This module allows to tag Docker images with new names and/or tags. + +extends_documentation_fragment: + - community.docker.docker.api_documentation + - community.docker.attributes + - community.docker.attributes.actiongroup_docker + +attributes: + check_mode: + support: full + diff_mode: + support: full + +options: + name: + description: + - "Image name. Name format will be one of: C(name), C(repository/name), C(registry_server:port/name). + When pushing or pulling an image the name can optionally include the tag by appending C(:tag_name)." + - Note that image IDs (hashes) can also be used. + type: str + required: true + tag: + description: + - Tag for the image name O(name) that is to be tagged. + - If O(name)'s format is C(name:tag), then the tag value from O(name) will take precedence. + type: str + default: latest + repository: + description: + - List of new image names to tag the image as. + - Expects format C(repository:tag). If no tag is provided, will use the value of the O(tag) parameter if present, or V(latest). + type: list + elements: str + required: true + existing_images: + description: + - Defines the behavior if the image to be tagged already exists and is another image than the one identified by O(name) and O(tag). + - If set to V(keep), the tagged image is kept. + - If set to V(overwrite), the tagged image is overwritten by the specified one. + type: str + choices: + - keep + - overwrite + default: overwrite + +requirements: + - "Docker API >= 1.25" + +author: + - Felix Fontein (@felixfontein) + +seealso: + - module: community.docker.docker_image_push + - module: community.docker.docker_image_remove +''' + +EXAMPLES = ''' +- name: Tag Python 3.12 image with two new names + community.docker.docker_image_tag: + name: python:3.12 + repository: + - python-3:3.12 + - local-registry:5000/python-3/3.12:latest +''' + +RETURN = ''' +image: + description: Image inspection results for the affected image. + returned: success + type: dict + sample: {} +tagged_images: + description: + - A list of images that got tagged. + returned: success + type: list + elements: str + sample: + - python-3:3.12 +''' + +import traceback + +from ansible.module_utils.common.text.converters import to_native +from ansible.module_utils.common.text.formatters import human_to_bytes + +from ansible_collections.community.docker.plugins.module_utils.common_api import ( + AnsibleDockerClient, + RequestException, +) + +from ansible_collections.community.docker.plugins.module_utils.util import ( + DockerBaseClass, + is_image_name_id, + is_valid_tag, +) + +from ansible_collections.community.docker.plugins.module_utils._api.errors import DockerException +from ansible_collections.community.docker.plugins.module_utils._api.utils.utils import ( + parse_repository_tag, +) + + +def convert_to_bytes(value, module, name, unlimited_value=None): + if value is None: + return value + try: + if unlimited_value is not None and value in ('unlimited', str(unlimited_value)): + return unlimited_value + return human_to_bytes(value) + except ValueError as exc: + module.fail_json(msg='Failed to convert %s to bytes: %s' % (name, to_native(exc))) + + +def image_info(name, tag, image): + result = dict(name=name, tag=tag) + if image: + result['id'] = image['Id'] + else: + result['exists'] = False + return result + + +class ImageTagger(DockerBaseClass): + def __init__(self, client): + super(ImageTagger, self).__init__() + + self.client = client + parameters = self.client.module.params + self.check_mode = self.client.check_mode + + self.name = parameters['name'] + self.tag = parameters['tag'] + if not is_valid_tag(self.tag, allow_empty=True): + self.fail('"{0}" is not a valid docker tag'.format(self.tag)) + + # If name contains a tag, it takes precedence over tag parameter. + if not is_image_name_id(self.name): + repo, repo_tag = parse_repository_tag(self.name) + if repo_tag: + self.name = repo + self.tag = repo_tag + + self.keep_existing_images = parameters['existing_images'] == 'keep' + + # Make sure names in repository are valid images, and add tag if needed + self.repositories = [] + for i, repository in enumerate(parameters['repository']): + if is_image_name_id(repository): + self.fail("repository[%d] must not be an image ID; got: %s" % (i + 1, repository)) + repo, repo_tag = parse_repository_tag(repository) + if not repo_tag: + repo_tag = parameters['tag'] + elif not is_valid_tag(repo_tag, allow_empty=False): + self.fail("repository[%d] must not have a digest; got: %s" % (i + 1, repository)) + self.repositories.append((repo, repo_tag)) + + def fail(self, msg): + self.client.fail(msg) + + def tag_image(self, image, name, tag): + tagged_image = self.client.find_image(name=name, tag=tag) + if tagged_image: + # Idempotency checks + if tagged_image['Id'] == image['Id']: + return ( + False, + "target image already exists (%s) and is as expected" % tagged_image['Id'], + tagged_image, + ) + if self.keep_existing_images: + return ( + False, + "target image already exists (%s) and is not as expected, but kept" % tagged_image['Id'], + tagged_image, + ) + msg = "target image existed (%s) and was not as expected" % tagged_image['Id'] + else: + msg = "target image did not exist" + + if not self.check_mode: + try: + params = { + 'tag': tag, + 'repo': name, + 'force': True, + } + res = self.client._post(self.client._url('/images/{0}/tag', image['Id']), params=params) + self.client._raise_for_status(res) + if res.status_code != 201: + raise Exception("Tag operation failed.") + except Exception as exc: + self.fail("Error: failed to tag image as %s:%s - %s" % (name, tag, to_native(exc))) + + return True, msg, tagged_image + + def tag_images(self): + if is_image_name_id(self.name): + image = self.client.find_image_by_id(self.name, accept_missing_image=False) + else: + image = self.client.find_image(name=self.name, tag=self.tag) + if not image: + self.fail("Cannot find image %s:%s" % (self.name, self.tag)) + + before = [] + after = [] + tagged_images = [] + results = dict( + changed=False, + actions=[], + image=image, + tagged_images=tagged_images, + diff=dict(before=dict(images=before), after=dict(images=after)), + ) + for repository, tag in self.repositories: + tagged, msg, old_image = self.tag_image(image, repository, tag) + before.append(image_info(repository, tag, old_image)) + after.append(image_info(repository, tag, image if tagged else old_image)) + if tagged: + results['changed'] = True + results['actions'].append('Tagged image %s as %s:%s: %s' % (image['Id'], repository, tag, msg)) + tagged_images.append('%s:%s' % (repository, tag)) + else: + results['actions'].append('Not tagged image %s as %s:%s: %s' % (image['Id'], repository, tag, msg)) + + return results + + +def main(): + argument_spec = dict( + name=dict(type='str', required=True), + tag=dict(type='str', default='latest'), + repository=dict(type='list', elements='str', required=True), + existing_images=dict(type='str', choices=['keep', 'overwrite'], default='overwrite'), + ) + + client = AnsibleDockerClient( + argument_spec=argument_spec, + supports_check_mode=True, + ) + + try: + results = ImageTagger(client).tag_images() + client.module.exit_json(**results) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) + except RequestException as e: + client.fail( + 'An unexpected requests error occurred when trying to talk to the Docker daemon: {0}'.format(to_native(e)), + exception=traceback.format_exc()) + + +if __name__ == '__main__': + main() diff --git a/ansible_collections/community/docker/plugins/modules/docker_login.py b/ansible_collections/community/docker/plugins/modules/docker_login.py index 360dd5785..bb4e00b87 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_login.py +++ b/ansible_collections/community/docker/plugins/modules/docker_login.py @@ -46,12 +46,12 @@ options: username: description: - The username for the registry account. - - Required when I(state) is C(present). + - Required when O(state=present). type: str password: description: - The plaintext password for the registry account. - - Required when I(state) is C(present). + - Required when O(state=present). type: str reauthorize: description: @@ -69,7 +69,7 @@ options: - dockercfg_path state: description: - - This controls the current state of the user. C(present) will login in a user, C(absent) will log them out. + - This controls the current state of the user. V(present) will login in a user, V(absent) will log them out. - To logout you only need the registry server, which defaults to DockerHub. - Before 2.1 you could ONLY log in. - Docker does not support 'logout' with a custom config file. @@ -112,7 +112,7 @@ EXAMPLES = ''' RETURN = ''' login_results: description: Results from the login. - returned: when I(state=present) + returned: when O(state=present) type: dict sample: { "serveraddress": "localhost:5000", @@ -261,7 +261,7 @@ class LoginManager(DockerBaseClass): def run(self): ''' - Do the actuall work of this task here. This allows instantiation for partial + Do the actual work of this task here. This allows instantiation for partial testing. ''' diff --git a/ansible_collections/community/docker/plugins/modules/docker_network.py b/ansible_collections/community/docker/plugins/modules/docker_network.py index db9323636..5670ceea0 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_network.py +++ b/ansible_collections/community/docker/plugins/modules/docker_network.py @@ -61,8 +61,8 @@ options: force: description: - - With state C(absent) forces disconnecting all containers from the - network prior to deleting the network. With state C(present) will + - With state V(absent) forces disconnecting all containers from the + network prior to deleting the network. With state V(present) will disconnect all containers, delete the network and re-create the network. - This option is required if you have changed the IPAM or driver options @@ -73,7 +73,7 @@ options: appends: description: - By default the connected list is canonical, meaning containers not on the list are removed from the network. - - Use I(appends) to leave existing containers connected. + - Use O(appends) to leave existing containers connected. type: bool default: false aliases: @@ -98,7 +98,7 @@ options: description: - List of IPAM config blocks. Consult L(Docker docs,https://docs.docker.com/compose/compose-file/compose-file-v2/#ipam) for valid options and values. - Note that I(iprange) is spelled differently here (we use the notation from the Docker SDK for Python). + Note that O(ipam_config[].iprange) is spelled differently here (we use the notation from the Docker SDK for Python). type: list elements: dict suboptions: @@ -121,14 +121,14 @@ options: state: description: - - C(absent) deletes the network. If a network has connected containers, it - cannot be deleted. Use the I(force) option to disconnect all containers + - V(absent) deletes the network. If a network has connected containers, it + cannot be deleted. Use the O(force) option to disconnect all containers and delete the network. - - C(present) creates the network, if it does not already exist with the + - V(present) creates the network, if it does not already exist with the specified parameters, and connects the list of containers provided via the connected parameter. Containers not on the list will be disconnected. An empty list will leave no containers connected to the network. Use the - I(appends) option to leave existing containers connected. Use the I(force) + O(appends) option to leave existing containers connected. Use the O(force) options to force re-creation of the network. type: str default: present @@ -163,7 +163,7 @@ options: notes: - When network options are changed, the module disconnects all containers from the network, deletes the network, and re-creates the network. - It does not try to reconnect containers, except the ones listed in (I(connected), and even for these, it does not consider specific + It does not try to reconnect containers, except the ones listed in (O(connected), and even for these, it does not consider specific connection options like fixed IP addresses or MAC addresses. If you need more control over how the containers are connected to the network, loop the M(community.docker.docker_container) module to loop over your containers to make sure they are connected properly. - The module does not support Docker Swarm. This means that it will not try to disconnect or reconnect services. If services are connected to the diff --git a/ansible_collections/community/docker/plugins/modules/docker_network_info.py b/ansible_collections/community/docker/plugins/modules/docker_network_info.py index 9818baad5..c2c445bd1 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_network_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_network_info.py @@ -66,7 +66,7 @@ exists: network: description: - Facts representing the current state of the network. Matches the docker inspection output. - - Will be C(none) if network does not exist. + - Will be V(none) if network does not exist. returned: always type: dict sample: { diff --git a/ansible_collections/community/docker/plugins/modules/docker_node.py b/ansible_collections/community/docker/plugins/modules/docker_node.py index d097b07f7..bfa369e98 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_node.py +++ b/ansible_collections/community/docker/plugins/modules/docker_node.py @@ -39,19 +39,19 @@ options: labels: description: - User-defined key/value metadata that will be assigned as node attribute. - - Label operations in this module apply to the docker swarm node specified by I(hostname). + - Label operations in this module apply to the docker swarm node specified by O(hostname). Use M(community.docker.docker_swarm) module to add/modify/remove swarm cluster labels. - The actual state of labels assigned to the node when module completes its work depends on - I(labels_state) and I(labels_to_remove) parameters values. See description below. + O(labels_state) and O(labels_to_remove) parameters values. See description below. type: dict labels_state: description: - - It defines the operation on the labels assigned to node and labels specified in I(labels) option. - - Set to C(merge) to combine labels provided in I(labels) with those already assigned to the node. + - It defines the operation on the labels assigned to node and labels specified in O(labels) option. + - Set to V(merge) to combine labels provided in O(labels) with those already assigned to the node. If no labels are assigned then it will add listed labels. For labels that are already assigned - to the node, it will update their values. The labels not specified in I(labels) will remain unchanged. - If I(labels) is empty then no changes will be made. - - Set to C(replace) to replace all assigned labels with provided ones. If I(labels) is empty then + to the node, it will update their values. The labels not specified in O(labels) will remain unchanged. + If O(labels) is empty then no changes will be made. + - Set to V(replace) to replace all assigned labels with provided ones. If O(labels) is empty then all labels assigned to the node will be removed. type: str default: 'merge' @@ -63,10 +63,10 @@ options: - List of labels that will be removed from the node configuration. The list has to contain only label names, not their values. - If the label provided on the list is not assigned to the node, the entry is ignored. - - If the label is both on the I(labels_to_remove) and I(labels), then value provided in I(labels) remains + - If the label is both on the O(labels_to_remove) and O(labels), then value provided in O(labels) remains assigned to the node. - - If I(labels_state) is C(replace) and I(labels) is not provided or empty then all labels assigned to - node are removed and I(labels_to_remove) is ignored. + - If O(labels_state=replace) and O(labels) is not provided or empty then all labels assigned to + node are removed and O(labels_to_remove) is ignored. type: list elements: str availability: diff --git a/ansible_collections/community/docker/plugins/modules/docker_node_info.py b/ansible_collections/community/docker/plugins/modules/docker_node_info.py index d943db31b..c64de0f8d 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_node_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_node_info.py @@ -33,14 +33,14 @@ options: - The list of nodes names to inspect. - If empty then return information of all nodes in Swarm cluster. - When identifying the node use either the hostname of the node (as registered in Swarm) or node ID. - - If I(self) is C(true) then this parameter is ignored. + - If O(self=true) then this parameter is ignored. type: list elements: str self: description: - - If C(true), queries the node (that is, the docker daemon) the module communicates with. - - If C(true) then I(name) is ignored. - - If C(false) then query depends on I(name) presence and value. + - If V(true), queries the node (that is, the docker daemon) the module communicates with. + - If V(true) then O(name) is ignored. + - If V(false) then query depends on O(name) presence and value. type: bool default: false @@ -79,8 +79,8 @@ RETURN = ''' nodes: description: - Facts representing the current state of the nodes. Matches the C(docker node inspect) output. - - Can contain multiple entries if more than one node provided in I(name), or I(name) is not provided. - - If I(name) contains a list of nodes, the output will provide information on all nodes registered + - Can contain multiple entries if more than one node provided in O(name), or O(name) is not provided. + - If O(name) contains a list of nodes, the output will provide information on all nodes registered at the swarm, including nodes that left the swarm but have not been removed from the cluster on swarm managers and nodes that are unreachable. returned: always diff --git a/ansible_collections/community/docker/plugins/modules/docker_plugin.py b/ansible_collections/community/docker/plugins/modules/docker_plugin.py index 9bb850665..e7242e8eb 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_plugin.py +++ b/ansible_collections/community/docker/plugins/modules/docker_plugin.py @@ -17,6 +17,8 @@ version_added: 1.3.0 description: - This module allows to install, delete, enable and disable Docker plugins. - Performs largely the same function as the C(docker plugin) CLI subcommand. +notes: + - The C(--grant-all-permissions) CLI flag is true by default in this module. extends_documentation_fragment: - community.docker.docker.api_documentation @@ -38,10 +40,10 @@ options: state: description: - - C(absent) remove the plugin. - - C(present) install the plugin, if it does not already exist. - - C(enable) enable the plugin. - - C(disable) disable the plugin. + - V(absent) remove the plugin. + - V(present) install the plugin, if it does not already exist. + - V(enable) enable the plugin. + - V(disable) disable the plugin. default: present choices: - absent @@ -121,7 +123,7 @@ plugin: actions: description: - List of actions performed during task execution. - returned: when I(state!=absent) + returned: when O(state) is not V(absent) type: list ''' diff --git a/ansible_collections/community/docker/plugins/modules/docker_prune.py b/ansible_collections/community/docker/plugins/modules/docker_prune.py index 1557f85a4..1dfbf290e 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_prune.py +++ b/ansible_collections/community/docker/plugins/modules/docker_prune.py @@ -124,14 +124,14 @@ RETURN = ''' containers: description: - List of IDs of deleted containers. - returned: I(containers) is C(true) + returned: O(containers=true) type: list elements: str sample: [] containers_space_reclaimed: description: - Amount of reclaimed disk space from container pruning in bytes. - returned: I(containers) is C(true) + returned: O(containers=true) type: int sample: 0 @@ -139,14 +139,14 @@ containers_space_reclaimed: images: description: - List of IDs of deleted images. - returned: I(images) is C(true) + returned: O(images=true) type: list elements: str sample: [] images_space_reclaimed: description: - Amount of reclaimed disk space from image pruning in bytes. - returned: I(images) is C(true) + returned: O(images=true) type: int sample: 0 @@ -154,7 +154,7 @@ images_space_reclaimed: networks: description: - List of IDs of deleted networks. - returned: I(networks) is C(true) + returned: O(networks=true) type: list elements: str sample: [] @@ -163,14 +163,14 @@ networks: volumes: description: - List of IDs of deleted volumes. - returned: I(volumes) is C(true) + returned: O(volumes=true) type: list elements: str sample: [] volumes_space_reclaimed: description: - Amount of reclaimed disk space from volumes pruning in bytes. - returned: I(volumes) is C(true) + returned: O(volumes=true) type: int sample: 0 @@ -178,7 +178,7 @@ volumes_space_reclaimed: builder_cache_space_reclaimed: description: - Amount of reclaimed disk space from builder cache pruning in bytes. - returned: I(builder_cache) is C(true) + returned: O(builder_cache=true) type: int sample: 0 ''' diff --git a/ansible_collections/community/docker/plugins/modules/docker_secret.py b/ansible_collections/community/docker/plugins/modules/docker_secret.py index 546756a49..cf4324541 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_secret.py +++ b/ansible_collections/community/docker/plugins/modules/docker_secret.py @@ -18,7 +18,7 @@ description: - Create and remove Docker secrets in a Swarm environment. Similar to C(docker secret create) and C(docker secret rm). - Adds to the metadata of new secrets C(ansible_key), an encrypted hash representation of the data, which is then used in future runs to test if a secret has changed. If C(ansible_key) is not present, then a secret will not be updated - unless the I(force) option is set. + unless the O(force) option is set. - Updates to secrets are performed by removing the secret and creating it again. extends_documentation_fragment: @@ -37,20 +37,20 @@ options: data: description: - The value of the secret. - - Mutually exclusive with I(data_src). One of I(data) and I(data_src) is required if I(state=present). + - Mutually exclusive with O(data_src). One of O(data) and O(data_src) is required if O(state=present). type: str data_is_b64: description: - - If set to C(true), the data is assumed to be Base64 encoded and will be + - If set to V(true), the data is assumed to be Base64 encoded and will be decoded before being used. - - To use binary I(data), it is better to keep it Base64 encoded and let it + - To use binary O(data), it is better to keep it Base64 encoded and let it be decoded by this option. type: bool default: false data_src: description: - The file on the target from which to read the secret. - - Mutually exclusive with I(data). One of I(data) and I(data_src) is required if I(state=present). + - Mutually exclusive with O(data). One of O(data) and O(data_src) is required if O(state=present). type: path version_added: 1.10.0 labels: @@ -60,22 +60,22 @@ options: type: dict force: description: - - Use with state C(present) to always remove and recreate an existing secret. - - If C(true), an existing secret will be replaced, even if it has not changed. + - Use with O(state=present) to always remove and recreate an existing secret. + - If V(true), an existing secret will be replaced, even if it has not changed. type: bool default: false rolling_versions: description: - - If set to C(true), secrets are created with an increasing version number appended to their name. + - If set to V(true), secrets are created with an increasing version number appended to their name. - Adds a label containing the version number to the managed secrets with the name C(ansible_version). type: bool default: false version_added: 2.2.0 versions_to_keep: description: - - When using I(rolling_versions), the number of old versions of the secret to keep. + - When using O(rolling_versions), the number of old versions of the secret to keep. - Extraneous old secrets are deleted after the new one is created. - - Set to C(-1) to keep everything or to C(0) or C(1) to keep only the current one. + - Set to V(-1) to keep everything or to V(0) or V(1) to keep only the current one. type: int default: 5 version_added: 2.2.0 @@ -86,7 +86,7 @@ options: required: true state: description: - - Set to C(present), if the secret should exist, and C(absent), if it should not. + - Set to V(present), if the secret should exist, and V(absent), if it should not. type: str default: present choices: @@ -175,13 +175,13 @@ RETURN = ''' secret_id: description: - The ID assigned by Docker to the secret object. - returned: success and I(state) is C(present) + returned: success and O(state=present) type: str sample: 'hzehrmyjigmcp2gb6nlhmjqcv' secret_name: description: - The name of the created secret object. - returned: success and I(state) is C(present) + returned: success and O(state=present) type: str sample: 'awesome_secret' version_added: 2.2.0 diff --git a/ansible_collections/community/docker/plugins/modules/docker_stack.py b/ansible_collections/community/docker/plugins/modules/docker_stack.py index 98f4c3ad9..728bc5cfc 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_stack.py +++ b/ansible_collections/community/docker/plugins/modules/docker_stack.py @@ -18,12 +18,16 @@ description: - Manage docker stacks using the C(docker stack) command on the target node (see examples). extends_documentation_fragment: + - community.docker.docker.cli_documentation - community.docker.attributes + - community.docker.attributes.actiongroup_docker attributes: check_mode: support: none diff_mode: support: none + action_group: + version_added: 3.6.0 options: name: description: @@ -68,8 +72,8 @@ options: choices: ["always", "changed", "never"] absent_retries: description: - - If C(>0) and I(state) is C(absent) the module will retry up to - I(absent_retries) times to delete the stack until all the + - If larger than V(0) and O(state=absent) the module will retry up to + O(absent_retries) times to delete the stack until all the resources have been effectively deleted. If the last try still reports the stack as not completely removed the module will fail. @@ -77,11 +81,32 @@ options: default: 0 absent_retries_interval: description: - - Interval in seconds between consecutive I(absent_retries). + - Interval in seconds between consecutive O(absent_retries). type: int default: 1 + docker_cli: + version_added: 3.6.0 + docker_host: + version_added: 3.6.0 + tls_hostname: + version_added: 3.6.0 + api_version: + version_added: 3.6.0 + ca_path: + version_added: 3.6.0 + client_cert: + version_added: 3.6.0 + client_key: + version_added: 3.6.0 + tls: + version_added: 3.6.0 + validate_certs: + version_added: 3.6.0 + cli_context: + version_added: 3.6.0 requirements: + - Docker CLI tool C(docker) - jsondiff - pyyaml ''' @@ -128,10 +153,20 @@ EXAMPLES = ''' import json +import os import tempfile +import traceback + from ansible.module_utils.six import string_types from time import sleep +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.docker.plugins.module_utils.common_cli import ( + AnsibleModuleDockerClient, + DockerException, +) + try: from jsondiff import diff as json_diff HAS_JSONDIFF = True @@ -144,28 +179,16 @@ try: except ImportError: HAS_YAML = False -from ansible.module_utils.basic import AnsibleModule, os - -def docker_stack_services(module, stack_name): - docker_bin = module.get_bin_path('docker', required=True) - rc, out, err = module.run_command([docker_bin, - "stack", - "services", - stack_name, - "--format", - "{{.Name}}"]) - if err == "Nothing found in stack: %s\n" % stack_name: +def docker_stack_services(client, stack_name): + rc, out, err = client.call_cli("stack", "services", stack_name, "--format", "{{.Name}}") + if to_native(err) == "Nothing found in stack: %s\n" % stack_name: return [] - return out.strip().split('\n') + return to_native(out).strip().split('\n') -def docker_service_inspect(module, service_name): - docker_bin = module.get_bin_path('docker', required=True) - rc, out, err = module.run_command([docker_bin, - "service", - "inspect", - service_name]) +def docker_service_inspect(client, service_name): + rc, out, err = client.call_cli("service", "inspect", service_name) if rc != 0: return None else: @@ -173,45 +196,43 @@ def docker_service_inspect(module, service_name): return ret -def docker_stack_deploy(module, stack_name, compose_files): - docker_bin = module.get_bin_path('docker', required=True) - command = [docker_bin, "stack", "deploy"] - if module.params["prune"]: +def docker_stack_deploy(client, stack_name, compose_files): + command = ["stack", "deploy"] + if client.module.params["prune"]: command += ["--prune"] - if module.params["with_registry_auth"]: + if client.module.params["with_registry_auth"]: command += ["--with-registry-auth"] - if module.params["resolve_image"]: + if client.module.params["resolve_image"]: command += ["--resolve-image", - module.params["resolve_image"]] + client.module.params["resolve_image"]] for compose_file in compose_files: command += ["--compose-file", compose_file] command += [stack_name] - return module.run_command(command) + rc, out, err = client.call_cli(*command) + return rc, to_native(out), to_native(err) -def docker_stack_inspect(module, stack_name): +def docker_stack_inspect(client, stack_name): ret = {} - for service_name in docker_stack_services(module, stack_name): - ret[service_name] = docker_service_inspect(module, service_name) + for service_name in docker_stack_services(client, stack_name): + ret[service_name] = docker_service_inspect(client, service_name) return ret -def docker_stack_rm(module, stack_name, retries, interval): - docker_bin = module.get_bin_path('docker', required=True) - command = [docker_bin, "stack", "rm", stack_name] - - rc, out, err = module.run_command(command) +def docker_stack_rm(client, stack_name, retries, interval): + command = ["stack", "rm", stack_name] + rc, out, err = client.call_cli(*command) - while err != "Nothing found in stack: %s\n" % stack_name and retries > 0: + while to_native(err) != "Nothing found in stack: %s\n" % stack_name and retries > 0: sleep(interval) retries = retries - 1 - rc, out, err = module.run_command(command) - return rc, out, err + rc, out, err = client.call_cli(*command) + return rc, to_native(out), to_native(err) def main(): - module = AnsibleModule( + client = AnsibleModuleDockerClient( argument_spec={ 'name': dict(type='str', required=True), 'compose': dict(type='list', elements='raw', default=[]), @@ -222,87 +243,97 @@ def main(): 'absent_retries': dict(type='int', default=0), 'absent_retries_interval': dict(type='int', default=1) }, - supports_check_mode=False + supports_check_mode=False, ) if not HAS_JSONDIFF: - return module.fail_json(msg="jsondiff is not installed, try 'pip install jsondiff'") + return client.fail("jsondiff is not installed, try 'pip install jsondiff'") if not HAS_YAML: - return module.fail_json(msg="yaml is not installed, try 'pip install pyyaml'") - - state = module.params['state'] - compose = module.params['compose'] - name = module.params['name'] - absent_retries = module.params['absent_retries'] - absent_retries_interval = module.params['absent_retries_interval'] - - if state == 'present': - if not compose: - module.fail_json(msg=("compose parameter must be a list " - "containing at least one element")) - - compose_files = [] - for i, compose_def in enumerate(compose): - if isinstance(compose_def, dict): - compose_file_fd, compose_file = tempfile.mkstemp() - module.add_cleanup_file(compose_file) - with os.fdopen(compose_file_fd, 'w') as stack_file: - compose_files.append(compose_file) - stack_file.write(yaml_dump(compose_def)) - elif isinstance(compose_def, string_types): - compose_files.append(compose_def) - else: - module.fail_json(msg="compose element '%s' must be a string or a dictionary" % compose_def) - - before_stack_services = docker_stack_inspect(module, name) - - rc, out, err = docker_stack_deploy(module, name, compose_files) - - after_stack_services = docker_stack_inspect(module, name) - - if rc != 0: - module.fail_json(msg="docker stack up deploy command failed", - rc=rc, - stdout=out, stderr=err) - - before_after_differences = json_diff(before_stack_services, - after_stack_services) - for k in before_after_differences.keys(): - if isinstance(before_after_differences[k], dict): - before_after_differences[k].pop('UpdatedAt', None) - before_after_differences[k].pop('Version', None) - if not list(before_after_differences[k].keys()): - before_after_differences.pop(k) - - if not before_after_differences: - module.exit_json( - changed=False, - rc=rc, - stdout=out, - stderr=err) - else: - module.exit_json( - changed=True, - rc=rc, - stdout=out, - stderr=err, - stack_spec_diff=json_diff(before_stack_services, - after_stack_services, - dump=True)) + return client.fail("yaml is not installed, try 'pip install pyyaml'") + + try: + state = client.module.params['state'] + compose = client.module.params['compose'] + name = client.module.params['name'] + absent_retries = client.module.params['absent_retries'] + absent_retries_interval = client.module.params['absent_retries_interval'] + + if state == 'present': + if not compose: + client.fail("compose parameter must be a list containing at least one element") + + compose_files = [] + for i, compose_def in enumerate(compose): + if isinstance(compose_def, dict): + compose_file_fd, compose_file = tempfile.mkstemp() + client.module.add_cleanup_file(compose_file) + with os.fdopen(compose_file_fd, 'w') as stack_file: + compose_files.append(compose_file) + stack_file.write(yaml_dump(compose_def)) + elif isinstance(compose_def, string_types): + compose_files.append(compose_def) + else: + client.fail("compose element '%s' must be a string or a dictionary" % compose_def) + + before_stack_services = docker_stack_inspect(client, name) + + rc, out, err = docker_stack_deploy(client, name, compose_files) + + after_stack_services = docker_stack_inspect(client, name) - else: - if docker_stack_services(module, name): - rc, out, err = docker_stack_rm(module, name, absent_retries, absent_retries_interval) if rc != 0: - module.fail_json(msg="'docker stack down' command failed", - rc=rc, - stdout=out, stderr=err) + client.fail("docker stack up deploy command failed", rc=rc, stdout=out, stderr=err) + + before_after_differences = json_diff(before_stack_services, after_stack_services) + for k in before_after_differences.keys(): + if isinstance(before_after_differences[k], dict): + before_after_differences[k].pop('UpdatedAt', None) + before_after_differences[k].pop('Version', None) + if not list(before_after_differences[k].keys()): + before_after_differences.pop(k) + + if not before_after_differences: + client.module.exit_json( + changed=False, + rc=rc, + stdout=out, + stderr=err, + ) else: - module.exit_json(changed=True, - msg=out, rc=rc, - stdout=out, stderr=err) - module.exit_json(changed=False) + client.module.exit_json( + changed=True, + rc=rc, + stdout=out, + stderr=err, + stack_spec_diff=json_diff( + before_stack_services, + after_stack_services, + dump=True, + ), + ) + + else: + if docker_stack_services(client, name): + rc, out, err = docker_stack_rm(client, name, absent_retries, absent_retries_interval) + if rc != 0: + client.module.fail_json( + msg="'docker stack down' command failed", + rc=rc, + stdout=out, + stderr=err, + ) + else: + client.module.exit_json( + changed=True, + msg=out, + rc=rc, + stdout=out, + stderr=err, + ) + client.module.exit_json(changed=False) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) if __name__ == "__main__": diff --git a/ansible_collections/community/docker/plugins/modules/docker_stack_info.py b/ansible_collections/community/docker/plugins/modules/docker_stack_info.py index bf3bfbdbe..21ce20dd6 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_stack_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_stack_info.py @@ -13,20 +13,52 @@ DOCUMENTATION = ''' --- module: docker_stack_info author: "Jose Angel Munoz (@imjoseangel)" -short_description: Return information on a docker stack +short_description: Return information on all docker stacks description: - Retrieve information on docker stacks using the C(docker stack) command on the target node (see examples). +requirements: + - Docker CLI tool C(docker) extends_documentation_fragment: + - community.docker.docker.cli_documentation - community.docker.attributes + - community.docker.attributes.actiongroup_docker - community.docker.attributes.info_module +attributes: + action_group: + version_added: 3.6.0 +options: + docker_cli: + version_added: 3.6.0 + docker_host: + version_added: 3.6.0 + tls_hostname: + version_added: 3.6.0 + api_version: + version_added: 3.6.0 + ca_path: + version_added: 3.6.0 + client_cert: + version_added: 3.6.0 + client_key: + version_added: 3.6.0 + tls: + version_added: 3.6.0 + validate_certs: + version_added: 3.6.0 + cli_context: + version_added: 3.6.0 +seealso: + - module: community.docker.docker_stack_task_info + description: >- + To retrieve detailed information about the services under a specific + stack use the M(community.docker.docker_stack_task_info) module. ''' RETURN = ''' results: - description: | - List of dictionaries containing the list of stacks or tasks associated - to a stack name. + description: + - List of dictionaries containing the list of stacks on the target node sample: - {"name":"grafana","namespace":"default","orchestrator":"Kubernetes","services":"2"} returned: always @@ -45,7 +77,14 @@ EXAMPLES = ''' ''' import json -from ansible.module_utils.basic import AnsibleModule +import traceback + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.docker.plugins.module_utils.common_cli import ( + AnsibleModuleDockerClient, + DockerException, +) def docker_stack_list(module): @@ -57,31 +96,23 @@ def docker_stack_list(module): def main(): - module = AnsibleModule( + client = AnsibleModuleDockerClient( argument_spec={ }, - supports_check_mode=True + supports_check_mode=True, ) - rc, out, err = docker_stack_list(module) - - if rc != 0: - module.fail_json(msg="Error running docker stack. {0}".format(err), - rc=rc, stdout=out, stderr=err) - else: - if out: - ret = list( - json.loads(outitem) - for outitem in out.splitlines()) - - else: - ret = [] - - module.exit_json(changed=False, - rc=rc, - stdout=out, - stderr=err, - results=ret) + try: + rc, ret, stderr = client.call_cli_json_stream('stack', 'ls', '--format={{json .}}', check_rc=True) + client.module.exit_json( + changed=False, + rc=rc, + stdout='\n'.join([json.dumps(entry) for entry in ret]), + stderr=to_native(stderr).strip(), + results=ret, + ) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) if __name__ == "__main__": diff --git a/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py b/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py index e3693bc54..72076310a 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_stack_task_info.py @@ -18,23 +18,50 @@ description: - Retrieve information on docker stacks tasks using the C(docker stack) command on the target node (see examples). extends_documentation_fragment: + - community.docker.docker.cli_documentation - community.docker.attributes + - community.docker.attributes.actiongroup_docker - community.docker.attributes.info_module +attributes: + action_group: + version_added: 3.6.0 options: name: description: - Stack name. type: str required: true + docker_cli: + version_added: 3.6.0 + docker_host: + version_added: 3.6.0 + tls_hostname: + version_added: 3.6.0 + api_version: + version_added: 3.6.0 + ca_path: + version_added: 3.6.0 + client_cert: + version_added: 3.6.0 + client_key: + version_added: 3.6.0 + tls: + version_added: 3.6.0 + validate_certs: + version_added: 3.6.0 + cli_context: + version_added: 3.6.0 +requirements: + - Docker CLI tool C(docker) ''' RETURN = ''' results: - description: | - List of dictionaries containing the list of tasks associated - to a stack name. - sample: > - [{"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""}] + description: + - List of dictionaries containing the list of tasks associated + to a stack name. + sample: + - {"CurrentState":"Running","DesiredState":"Running","Error":"","ID":"7wqv6m02ugkw","Image":"busybox","Name":"test_stack.1","Node":"swarm","Ports":""} returned: always type: list elements: dict @@ -52,7 +79,14 @@ EXAMPLES = ''' ''' import json -from ansible.module_utils.basic import AnsibleModule +import traceback + +from ansible.module_utils.common.text.converters import to_native + +from ansible_collections.community.docker.plugins.module_utils.common_cli import ( + AnsibleModuleDockerClient, + DockerException, +) def docker_stack_task(module, stack_name): @@ -64,34 +98,25 @@ def docker_stack_task(module, stack_name): def main(): - module = AnsibleModule( + client = AnsibleModuleDockerClient( argument_spec={ 'name': dict(type='str', required=True) }, - supports_check_mode=True + supports_check_mode=True, ) - name = module.params['name'] - - rc, out, err = docker_stack_task(module, name) - - if rc != 0: - module.fail_json(msg="Error running docker stack. {0}".format(err), - rc=rc, stdout=out, stderr=err) - else: - if out: - ret = list( - json.loads(outitem) - for outitem in out.splitlines()) - - else: - ret = [] - - module.exit_json(changed=False, - rc=rc, - stdout=out, - stderr=err, - results=ret) + try: + name = client.module.params['name'] + rc, ret, stderr = client.call_cli_json_stream('stack', 'ps', name, '--format={{json .}}', check_rc=True) + client.module.exit_json( + changed=False, + rc=rc, + stdout='\n'.join([json.dumps(entry) for entry in ret]), + stderr=to_native(stderr).strip(), + results=ret, + ) + except DockerException as e: + client.fail('An unexpected Docker error occurred: {0}'.format(to_native(e)), exception=traceback.format_exc()) if __name__ == "__main__": diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm.py b/ansible_collections/community/docker/plugins/modules/docker_swarm.py index 69b88f583..dc04c0a40 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_swarm.py +++ b/ansible_collections/community/docker/plugins/modules/docker_swarm.py @@ -32,11 +32,11 @@ options: description: - Externally reachable address advertised to other nodes. - This can either be an address/port combination - in the form C(192.168.1.1:4567), or an interface followed by a - port number, like C(eth0:4567). + in the form V(192.168.1.1:4567), or an interface followed by a + port number, like V(eth0:4567). - If the port number is omitted, the port number from the listen address is used. - - If I(advertise_addr) is not specified, it will be automatically + - If O(advertise_addr) is not specified, it will be automatically detected when possible. - Only used when swarm is initialised or joined. Because of this it's not considered for idempotency checking. @@ -60,8 +60,8 @@ options: description: - Listen address used for inter-manager communication. - This can either be an address/port combination in the form - C(192.168.1.1:4567), or an interface followed by a port number, - like C(eth0:4567). + V(192.168.1.1:4567), or an interface followed by a port number, + like V(eth0:4567). - If the port number is omitted, the default swarm listening port is used. - Only used when swarm is initialised or joined. Because of this it's not @@ -70,16 +70,16 @@ options: default: 0.0.0.0:2377 force: description: - - Use with state C(present) to force creating a new Swarm, even if already part of one. - - Use with state C(absent) to Leave the swarm even if this node is a manager. + - Use with state V(present) to force creating a new Swarm, even if already part of one. + - Use with state V(absent) to Leave the swarm even if this node is a manager. type: bool default: false state: description: - - Set to C(present), to create/update a new cluster. - - Set to C(join), to join an existing cluster. - - Set to C(absent), to leave an existing cluster. - - Set to C(remove), to remove an absent node from the cluster. + - Set to V(present), to create/update a new cluster. + - Set to V(join), to join an existing cluster. + - Set to V(absent), to leave an existing cluster. + - Set to V(remove), to remove an absent node from the cluster. Note that removing requires Docker SDK for Python >= 2.4.0. - M(community.docker.docker_node) can be used to demote a manager before removal. type: str @@ -92,35 +92,35 @@ options: node_id: description: - Swarm id of the node to remove. - - Used with I(state=remove). + - Used with O(state=remove). type: str join_token: description: - Swarm token used to join a swarm cluster. - - Used with I(state=join). + - Used with O(state=join). - If this value is specified, the corresponding value in the return values will be censored by Ansible. This is a side-effect of this value not being logged. type: str remote_addrs: description: - Remote address of one or more manager nodes of an existing Swarm to connect to. - - Used with I(state=join). + - Used with O(state=join). type: list elements: str task_history_retention_limit: description: - Maximum number of tasks history stored. - - Docker default value is C(5). + - Docker default value is V(5). type: int snapshot_interval: description: - Number of logs entries between snapshot. - - Docker default value is C(10000). + - Docker default value is V(10000). type: int keep_old_snapshots: description: - Number of snapshots to keep beyond the current snapshot. - - Docker default value is C(0). + - Docker default value is V(0). type: int log_entries_for_slow_followers: description: @@ -129,23 +129,23 @@ options: heartbeat_tick: description: - Amount of ticks (in seconds) between each heartbeat. - - Docker default value is C(1) seconds. + - Docker default value is V(1) seconds. type: int election_tick: description: - Amount of ticks (in seconds) needed without a leader to trigger a new election. - - Docker default value is C(10) seconds. + - Docker default value is V(10) seconds. type: int dispatcher_heartbeat_period: description: - The delay (in nanoseconds) for an agent to send a heartbeat to the dispatcher. - - Docker default value is 5 seconds, which corresponds to a value of C(5000000000). + - Docker default value is 5 seconds, which corresponds to a value of V(5000000000). # DefaultHeartBeatPeriod in https://github.com/moby/moby/blob/master/vendor/github.com/moby/swarmkit/v2/manager/dispatcher/dispatcher.go#L32 type: int node_cert_expiry: description: - Automatic expiry for nodes certificates, given in nanoseconds. - - Docker default value is 90 days, which corresponds to a value of C(7776000000000000). + - Docker default value is 90 days, which corresponds to a value of V(7776000000000000). # DefaultNodeCertExpiration in https://github.com/moby/moby/blob/master/vendor/github.com/moby/swarmkit/v2/ca/certificates.go#L56 type: int name: @@ -175,13 +175,13 @@ options: description: - An integer whose purpose is to force swarm to generate a new signing CA certificate and key, if none have been specified. - - Docker default value is C(0). + - Docker default value is V(0). - Requires API version >= 1.30. type: int autolock_managers: description: - If set, generate a key and use it to lock data stored on the managers. - - Docker default value is C(false). + - Docker default value is V(false). - M(community.docker.docker_swarm_info) can be used to retrieve the unlock key. type: bool rotate_worker_token: @@ -195,18 +195,20 @@ options: data_path_addr: description: - Address or interface to use for data path traffic. - - This can either be an address in the form C(192.168.1.1), or an interface, - like C(eth0). + - This can either be an address in the form V(192.168.1.1), or an interface, + like V(eth0). - Only used when swarm is initialised or joined. Because of this it is not considered for idempotency checking. + - Requires API version >= 1.30. type: str version_added: 2.5.0 data_path_port: description: - Port to use for data path traffic. - - This needs to be a port number like C(9789). + - This needs to be a port number like V(9789). - Only used when swarm is initialised. Because of this it is not considered for idempotency checking. + - Requires API version >= 1.40. type: int version_added: 3.1.0 @@ -264,7 +266,7 @@ EXAMPLES = ''' RETURN = ''' swarm_facts: - description: Informations about swarm. + description: Information about swarm. returned: success type: dict contains: @@ -276,8 +278,8 @@ swarm_facts: Worker: description: - Token to join the cluster as a new *worker* node. - - "B(Note:) if this value has been specified as I(join_token), the value here will not - be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass I(join_token), + - "B(Note:) if this value has been specified as O(join_token), the value here will not + be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass O(join_token), make sure your playbook/role does not depend on this return value!" returned: success type: str @@ -285,16 +287,16 @@ swarm_facts: Manager: description: - Token to join the cluster as a new *manager* node. - - "B(Note:) if this value has been specified as I(join_token), the value here will not - be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass I(join_token), + - "B(Note:) if this value has been specified as O(join_token), the value here will not + be the token, but C(VALUE_SPECIFIED_IN_NO_LOG_PARAMETER). If you pass O(join_token), make sure your playbook/role does not depend on this return value!" returned: success type: str example: SWMTKN-1--xxxxx UnlockKey: - description: The swarm unlock-key if I(autolock_managers) is C(true). - returned: on success if I(autolock_managers) is C(true) - and swarm is initialised, or if I(autolock_managers) has changed. + description: The swarm unlock-key if O(autolock_managers=true). + returned: on success if O(autolock_managers=true) + and swarm is initialised, or if O(autolock_managers) has changed. type: str example: SWMKEY-1-xxx @@ -534,7 +536,6 @@ class SwarmManager(DockerBaseClass): init_arguments = { 'advertise_addr': self.parameters.advertise_addr, 'listen_addr': self.parameters.listen_addr, - 'data_path_addr': self.parameters.data_path_addr, 'force_new_cluster': self.force, 'swarm_spec': self.parameters.spec, } @@ -542,6 +543,8 @@ class SwarmManager(DockerBaseClass): init_arguments['default_addr_pool'] = self.parameters.default_addr_pool if self.parameters.subnet_size is not None: init_arguments['subnet_size'] = self.parameters.subnet_size + if self.parameters.data_path_addr is not None: + init_arguments['data_path_addr'] = self.parameters.data_path_addr if self.parameters.data_path_port is not None: init_arguments['data_path_port'] = self.parameters.data_path_port try: @@ -595,11 +598,16 @@ class SwarmManager(DockerBaseClass): self.results['actions'].append("This node is already part of a swarm.") return if not self.check_mode: + join_arguments = { + 'remote_addrs': self.parameters.remote_addrs, + 'join_token': self.parameters.join_token, + 'listen_addr': self.parameters.listen_addr, + 'advertise_addr': self.parameters.advertise_addr, + } + if self.parameters.data_path_addr is not None: + join_arguments['data_path_addr'] = self.parameters.data_path_addr try: - self.client.join_swarm( - remote_addrs=self.parameters.remote_addrs, join_token=self.parameters.join_token, - listen_addr=self.parameters.listen_addr, advertise_addr=self.parameters.advertise_addr, - data_path_addr=self.parameters.data_path_addr) + self.client.join_swarm(**join_arguments) except APIError as exc: self.client.fail("Can not join the Swarm Cluster: %s" % to_native(exc)) self.results['actions'].append("New node is added to swarm cluster") diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py b/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py index df1e5af95..6c6008dcf 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_swarm_info.py @@ -77,10 +77,10 @@ options: default: false verbose_output: description: - - When set to C(true) and I(nodes), I(services) or I(tasks) is set to C(true), then the module output will + - When set to V(true) and O(nodes), O(services), or O(tasks) is set to V(true), then the module output will contain verbose information about objects matching the full output of API method. - For details see the documentation of your version of Docker API at U(https://docs.docker.com/engine/api/). - - The verbose output in this module contains only subset of information returned by I(_info) module + - The verbose output in this module contains only subset of information returned by this info module for each type of the objects. type: bool default: false @@ -139,21 +139,21 @@ EXAMPLES = ''' RETURN = ''' can_talk_to_docker: description: - - Will be C(true) if the module can talk to the docker daemon. + - Will be V(true) if the module can talk to the docker daemon. returned: both on success and on error type: bool docker_swarm_active: description: - - Will be C(true) if the module can talk to the docker daemon, + - Will be V(true) if the module can talk to the docker daemon, and the docker daemon is in Swarm mode. returned: both on success and on error type: bool docker_swarm_manager: description: - - Will be C(true) if the module can talk to the docker daemon, + - Will be V(true) if the module can talk to the docker daemon, the docker daemon is in Swarm mode, and the current node is a manager node. - - Only if this one is C(true), the module will not fail. + - Only if this one is V(true), the module will not fail. returned: both on success and on error type: bool swarm_facts: @@ -165,30 +165,30 @@ swarm_facts: swarm_unlock_key: description: - Contains the key needed to unlock the swarm. - returned: When I(unlock_key) is C(true). + returned: When O(unlock_key=true). type: str nodes: description: - List of dict objects containing the basic information about each volume. - Keys matches the C(docker node ls) output unless I(verbose_output=true). - See description for I(verbose_output). - returned: When I(nodes) is C(true) + Keys matches the C(docker node ls) output unless O(verbose_output=true). + See description for O(verbose_output). + returned: When O(nodes=true) type: list elements: dict services: description: - List of dict objects containing the basic information about each volume. - Keys matches the C(docker service ls) output unless I(verbose_output=true). - See description for I(verbose_output). - returned: When I(services) is C(true) + Keys matches the C(docker service ls) output unless O(verbose_output=true). + See description for O(verbose_output). + returned: When O(services=true) type: list elements: dict tasks: description: - List of dict objects containing the basic information about each volume. - Keys matches the C(docker service ps) output unless I(verbose_output=true). - See description for I(verbose_output). - returned: When I(tasks) is C(true) + Keys matches the C(docker service ps) output unless O(verbose_output=true). + See description for O(verbose_output). + returned: When O(tasks=true) type: list elements: dict diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py b/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py index 564234cb5..95cc10366 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py +++ b/ansible_collections/community/docker/plugins/modules/docker_swarm_service.py @@ -64,7 +64,7 @@ options: required: true filename: description: - - Name of the file containing the config. Defaults to the I(config_name) if not specified. + - Name of the file containing the config. Defaults to the O(configs[].config_name) if not specified. type: str uid: description: @@ -76,7 +76,7 @@ options: type: str mode: description: - - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)). + - File access mode inside the container. Must be an octal number (like V(0644) or V(0444)). type: int container_labels: description: @@ -114,7 +114,7 @@ options: - List or dictionary of the service environment variables. - If passed a list each items need to be in the format of C(KEY=VALUE). - If passed a dictionary values which might be parsed as numbers, - booleans or other types by the YAML parser must be quoted (for example C("true")) + booleans or other types by the YAML parser must be quoted (for example V("true")) in order to avoid data loss. - Corresponds to the C(--env) option of C(docker service create). type: raw @@ -123,7 +123,7 @@ options: - List of paths to files, present on the target, containing environment variables C(FOO=BAR). - The order of the list is significant in determining the value assigned to a variable that shows up more than once. - - If variable also present in I(env), then I(env) value will override. + - If variable also present in O(env), then O(env) value will override. type: list elements: path force_update: @@ -143,14 +143,15 @@ options: - Configure a check that is run to determine whether or not containers for this service are "healthy". See the docs for the L(HEALTHCHECK Dockerfile instruction,https://docs.docker.com/engine/reference/builder/#healthcheck) for details on how healthchecks work. - - "I(interval), I(timeout) and I(start_period) are specified as durations. They accept duration as a string in a format - that look like: C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + - "O(healthcheck.interval), O(healthcheck.timeout), and O(healthcheck.start_period) are specified as durations. + They accept duration as a string in a format that look like: V(5h34m56s), V(1m30s), and so on. + The supported units are V(us), V(ms), V(s), V(m) and V(h)." type: dict suboptions: test: description: - Command to run to check health. - - Must be either a string or a list. If it is a list, the first item must be one of C(NONE), C(CMD) or C(CMD-SHELL). + - Must be either a string or a list. If it is a list, the first item must be one of V(NONE), V(CMD) or V(CMD-SHELL). type: raw interval: description: @@ -201,15 +202,15 @@ options: suboptions: cpus: description: - - Service CPU limit. C(0) equals no limit. + - Service CPU limit. V(0) equals no limit. - Corresponds to the C(--limit-cpu) option of C(docker service create). type: float memory: description: - "Service memory limit in format C(<number>[<unit>]). Number is a positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - C(0) equals no limit. + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." + - V(0) equals no limit. - Omitting the unit defaults to bytes. - Corresponds to the C(--limit-memory) option of C(docker service create). type: str @@ -249,7 +250,7 @@ options: source: description: - Mount source (for example a volume name or a host path). - - Must be specified if I(type) is not C(tmpfs). + - Must be specified if O(mounts[].type) is not V(tmpfs). type: str target: description: @@ -259,7 +260,7 @@ options: type: description: - The mount type. - - Note that C(npipe) is only supported by Docker for Windows. Also note that C(npipe) was added in Ansible 2.9. + - Note that V(npipe) is only supported by Docker for Windows. Also note that V(npipe) was added in Ansible 2.9. type: str default: bind choices: @@ -278,7 +279,7 @@ options: propagation: description: - The propagation mode to use. - - Can only be used when I(type) is C(bind). + - Can only be used when O(mounts[].type=bind). type: str choices: - shared @@ -290,12 +291,12 @@ options: no_copy: description: - Disable copying of data from a container when a volume is created. - - Can only be used when I(type) is C(volume). + - Can only be used when O(mounts[].type=volume). type: bool driver_config: description: - Volume driver configuration. - - Can only be used when I(type) is C(volume). + - Can only be used when O(mounts[].type=volume). suboptions: name: description: @@ -309,14 +310,14 @@ options: tmpfs_size: description: - "Size of the tmpfs mount in format C(<number>[<unit>]). Number is a positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - Can only be used when I(type) is C(tmpfs). + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." + - Can only be used when O(mounts[].type=tmpfs). type: str tmpfs_mode: description: - File mode of the tmpfs in octal. - - Can only be used when I(type) is C(tmpfs). + - Can only be used when O(mounts[].type=tmpfs). type: int name: description: @@ -327,8 +328,8 @@ options: networks: description: - List of the service networks names or dictionaries. - - When passed dictionaries valid sub-options are I(name), which is required, and - I(aliases) and I(options). + - When passed dictionaries valid sub-options are C(name), which is required, and + C(aliases) and C(options). - Prior to API version 1.29, updating and removing networks is not supported. If changes are made the service will then be removed and recreated. - Corresponds to the C(--network) option of C(docker service create). @@ -399,9 +400,9 @@ options: type: bool replicas: description: - - Number of containers instantiated in the service. Valid only if I(mode) is C(replicated). - - If set to C(-1), and service is not present, service replicas will be set to C(1). - - If set to C(-1), and service is present, service replicas will be unchanged. + - Number of containers instantiated in the service. Valid only if O(mode=replicated). + - If set to V(-1), and service is not present, service replicas will be set to V(1). + - If set to V(-1), and service is present, service replicas will be unchanged. - Corresponds to the C(--replicas) option of C(docker service create). type: int default: -1 @@ -411,15 +412,15 @@ options: suboptions: cpus: description: - - Service CPU reservation. C(0) equals no reservation. + - Service CPU reservation. V(0) equals no reservation. - Corresponds to the C(--reserve-cpu) option of C(docker service create). type: float memory: description: - "Service memory reservation in format C(<number>[<unit>]). Number is a positive integer. - Unit can be C(B) (byte), C(K) (kibibyte, 1024B), C(M) (mebibyte), C(G) (gibibyte), - C(T) (tebibyte), or C(P) (pebibyte)." - - C(0) equals no reservation. + Unit can be V(B) (byte), V(K) (kibibyte, 1024B), V(M) (mebibyte), V(G) (gibibyte), + V(T) (tebibyte), or V(P) (pebibyte)." + - V(0) equals no reservation. - Omitting the unit defaults to bytes. - Corresponds to the C(--reserve-memory) option of C(docker service create). type: str @@ -447,7 +448,7 @@ options: description: - Delay between restarts. - "Accepts a a string in a format that look like: - C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + V(5h34m56s), V(1m30s) etc. The supported units are V(us), V(ms), V(s), V(m) and V(h)." - Corresponds to the C(--restart-delay) option of C(docker service create). type: str max_attempts: @@ -459,7 +460,7 @@ options: description: - Restart policy evaluation window. - "Accepts a string in a format that look like: - C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + V(5h34m56s), V(1m30s) etc. The supported units are V(us), V(ms), V(s), V(m) and V(h)." - Corresponds to the C(--restart-window) option of C(docker service create). type: str type: dict @@ -477,7 +478,7 @@ options: description: - Delay between task rollbacks. - "Accepts a string in a format that look like: - C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + V(5h34m56s), V(1m30s) etc. The supported units are V(us), V(ms), V(s), V(m) and V(h)." - Corresponds to the C(--rollback-delay) option of C(docker service create). - Requires API version >= 1.28. type: str @@ -494,7 +495,7 @@ options: description: - Duration after each task rollback to monitor for failure. - "Accepts a string in a format that look like: - C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + V(5h34m56s), V(1m30s) etc. The supported units are V(us), V(ms), V(s), V(m) and V(h)." - Corresponds to the C(--rollback-monitor) option of C(docker service create). - Requires API version >= 1.28. type: str @@ -529,7 +530,7 @@ options: required: true filename: description: - - Name of the file containing the secret. Defaults to the I(secret_name) if not specified. + - Name of the file containing the secret. Defaults to the O(secrets[].secret_name) if not specified. - Corresponds to the C(target) key of C(docker service create --secret). type: str uid: @@ -542,12 +543,12 @@ options: type: str mode: description: - - File access mode inside the container. Must be an octal number (like C(0644) or C(0444)). + - File access mode inside the container. Must be an octal number (like V(0644) or V(0444)). type: int state: description: - - C(absent) - A service matching the specified name will be removed and have its tasks stopped. - - C(present) - Asserts the existence of a service matching the name and provided configuration parameters. + - V(absent) - A service matching the specified name will be removed and have its tasks stopped. + - V(present) - Asserts the existence of a service matching the name and provided configuration parameters. Unspecified configuration parameters will be set to docker defaults. type: str default: present @@ -558,7 +559,7 @@ options: description: - Time to wait before force killing a container. - "Accepts a duration as a string in a format that look like: - C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + V(5h34m56s), V(1m30s) etc. The supported units are V(us), V(ms), V(s), V(m) and V(h)." - Corresponds to the C(--stop-grace-period) option of C(docker service create). type: str stop_signal: @@ -584,14 +585,14 @@ options: description: - Rolling update delay. - "Accepts a string in a format that look like: - C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + V(5h34m56s), V(1m30s) etc. The supported units are V(us), V(ms), V(s), V(m) and V(h)." - Corresponds to the C(--update-delay) option of C(docker service create). type: str failure_action: description: - Action to take in case of container failure. - Corresponds to the C(--update-failure-action) option of C(docker service create). - - Usage of I(rollback) requires API version >= 1.29. + - Usage of V(rollback) requires API version >= 1.29. type: str choices: - continue @@ -601,7 +602,7 @@ options: description: - Time to monitor updated tasks for failures. - "Accepts a string in a format that look like: - C(5h34m56s), C(1m30s) etc. The supported units are C(us), C(ms), C(s), C(m) and C(h)." + V(5h34m56s), V(1m30s) etc. The supported units are V(us), V(ms), V(s), V(m) and V(h)." - Corresponds to the C(--update-monitor) option of C(docker service create). type: str max_failure_ratio: @@ -619,7 +620,7 @@ options: user: description: - Sets the username or UID used for the specified command. - - Before Ansible 2.8, the default value for this option was C(root). + - Before Ansible 2.8, the default value for this option was V(root). - The default has been removed so that the user defined in the image is used if no user is specified here. - Corresponds to the C(--user) option of C(docker service create). type: str @@ -648,7 +649,7 @@ requirements: - "Docker API >= 1.25" notes: - "Images will only resolve to the latest digest when using Docker API >= 1.30 and Docker SDK for Python >= 3.2.0. - When using older versions use C(force_update: true) to trigger the swarm to resolve a new image." + When using older versions use O(force_update=true) to trigger the swarm to resolve a new image." ''' RETURN = ''' @@ -661,7 +662,7 @@ swarm_service: - Note that facts are not part of registered vars but accessible directly. - Note that before Ansible 2.7.9, the return variable was documented as C(ansible_swarm_service), while the module actually returned a variable called C(ansible_docker_service). The variable - was renamed to C(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0. + was renamed to RV(swarm_service) in both code and documentation for Ansible 2.7.9 and Ansible 2.8.0. In Ansible 2.7.x, the old name C(ansible_docker_service) can still be used. sample: '{ "args": [ @@ -1148,7 +1149,7 @@ def has_list_changed(new_list, old_list, sort_lists=True, sort_key=None): else: zip_data = zip(new_list, old_list) for new_item, old_item in zip_data: - is_same_type = type(new_item) == type(old_item) + is_same_type = type(new_item) == type(old_item) # noqa: E721, pylint: disable=unidiomatic-typecheck if not is_same_type: if isinstance(new_item, string_types) and isinstance(old_item, string_types): # Even though the types are different between these items, diff --git a/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py b/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py index ebe8a8e10..0a9dd56c8 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_swarm_service_info.py @@ -58,7 +58,7 @@ exists: service: description: - A dictionary representing the current state of the service. Matches the C(docker service inspect) output. - - Will be C(none) if service does not exist. + - Will be V(none) if service does not exist. returned: always type: dict ''' diff --git a/ansible_collections/community/docker/plugins/modules/docker_volume.py b/ansible_collections/community/docker/plugins/modules/docker_volume.py index 09b1d386b..f282cafd0 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_volume.py +++ b/ansible_collections/community/docker/plugins/modules/docker_volume.py @@ -38,7 +38,7 @@ options: driver: description: - - Specify the type of volume. Docker provides the C(local) driver, but 3rd party drivers can also be used. + - Specify the type of volume. Docker provides the V(local) driver, but 3rd party drivers can also be used. type: str default: local @@ -56,13 +56,13 @@ options: recreate: description: - - Controls when a volume will be recreated when I(state) is C(present). Please + - Controls when a volume will be recreated when O(state=present). Please note that recreating an existing volume will cause B(any data in the existing volume to be lost!) The volume will be deleted and a new volume with the same name will be created. - - The value C(always) forces the volume to be always recreated. - - The value C(never) makes sure the volume will not be recreated. - - The value C(options-changed) makes sure the volume will be recreated if the volume + - The value V(always) forces the volume to be always recreated. + - The value V(never) makes sure the volume will not be recreated. + - The value V(options-changed) makes sure the volume will be recreated if the volume already exist and the driver, driver options or labels differ. type: str default: never @@ -73,8 +73,8 @@ options: state: description: - - C(absent) deletes the volume. - - C(present) creates the volume, if it does not already exist. + - V(absent) deletes the volume. + - V(present) creates the volume, if it does not already exist. type: str default: present choices: @@ -214,7 +214,7 @@ class DockerVolumeManager(object): parameter=value, active=self.existing_volume['Options'].get(key)) if self.parameters.labels: - existing_labels = self.existing_volume.get('Labels', {}) + existing_labels = self.existing_volume.get('Labels') or {} for label in self.parameters.labels: if existing_labels.get(label) != self.parameters.labels.get(label): differences.add('labels.%s' % label, diff --git a/ansible_collections/community/docker/plugins/modules/docker_volume_info.py b/ansible_collections/community/docker/plugins/modules/docker_volume_info.py index 100010ba4..9345d2e6e 100644 --- a/ansible_collections/community/docker/plugins/modules/docker_volume_info.py +++ b/ansible_collections/community/docker/plugins/modules/docker_volume_info.py @@ -63,7 +63,7 @@ exists: volume: description: - Volume inspection results for the affected volume. - - Will be C(none) if volume does not exist. + - Will be V(none) if volume does not exist. returned: success type: dict sample: '{ |